commit 526dafd8944bc2b4b4924120a5de0d1c73a2db44 Author: Kenan Gömek Date: Mon May 9 20:30:26 2022 +0200 SA Kenan first init diff --git a/01_Auswertung/Auswertung_Farbkanaele_Multiprocessing.py b/01_Auswertung/Auswertung_Farbkanaele_Multiprocessing.py new file mode 100644 index 0000000..7dfe957 --- /dev/null +++ b/01_Auswertung/Auswertung_Farbkanaele_Multiprocessing.py @@ -0,0 +1,237 @@ +# Date: 18.12.2021 +# Author: Kenan Gömek +# Das Skript stellt die unterschiedlichen Farbkanäle der aufgenommenen Bilder dar und erstellt schnitte, um die Pixelwerte anzuzeigen. +# Update: 18.02.2021 +# Update Comment: Multiprocessing + +# Import needed Packages +import cv2 as cv +import numpy as np +import time +from datetime import datetime +import os +from matplotlib import pyplot as plt + +from multiprocessing import Process, cpu_count, Pool + +# define User Input +# No vowels in path name! +PATH_IMAGE_FOLDER = r'U:\bwsyncshare\Auswertung' + +COMMENT = "" # User comment for plot + + +# define functions +def calc_arithmetic_mean_of_brightness_per_pixel(r, g, b): + """Calculate overall brightness per pixel of the image. Mittelere Helligkeit pro pixel berechnen.""" + r = r.astype('uint16') # set dtype of one colour to uint16, because the sum of 255+255+255 >255 =765 + # the rest will also be uint16 then + image_heigth = r.shape[0] + image_width = r.shape[1] + + number_of_colour_channels = 3 + arithmetic_mean_of_brightness_image = np.sum((r+g+b)/number_of_colour_channels) + arithmetic_mean_of_brightness_per_pixel = arithmetic_mean_of_brightness_image/(image_width*image_heigth) + + max_possible_brightness = 255 # maximum possible brightness for an uint8 + arithmetic_mean_of_brightness_per_pixel_relative = arithmetic_mean_of_brightness_per_pixel/max_possible_brightness # range: 0-1 + + return arithmetic_mean_of_brightness_per_pixel_relative + +def image_analysis(image_name, image_number, number_of_total_images, comment): + print(f"Current image {image_number+1}/{number_of_total_images}: {image_name}") + + # Split and Merge colour channels + img_bgr = cv.imread(os.path.join(PATH_IMAGE_FOLDER,image_name), cv.IMREAD_COLOR) + b,g,r = cv.split(img_bgr) + img_rgb = img_bgr[:, :, ::-1] + + # Create Cuts and display Pixel Values + image_width = r.shape[1] + if image_width < 1000: + plot_marker_offset = 4 # Thickness of marker-line in plot in pixels + elif image_width >= 1000: + plot_marker_offset = 10 # Thickness of marker-line in plot in pixels + + + # Identify dominating colour channel and set cut row and column + max_pixelvalue_red = r.max(); max_pixelvalue_green = g.max(); max_pixelvalue_blue = b.max() + max_pixelvalue = np.max([max_pixelvalue_red, max_pixelvalue_green, max_pixelvalue_blue]) + + idx_max_pixelvalue_red = np.unravel_index(r.argmax(), r.shape) #row=idx_..[0] column=idx_..[1] + idx_max_pixelvalue_green = np.unravel_index(g.argmax(), g.shape) #row=idx_..[0] column=idx_..[1] + idx_max_pixelvalue_blue = np.unravel_index(b.argmax(), b.shape) #row=idx_..[0] column=idx_..[1] + if max_pixelvalue == max_pixelvalue_red: + idx_max_pixelvalue = idx_max_pixelvalue_red; msg_dominating_colourchannel = 'red' + elif max_pixelvalue == max_pixelvalue_green: + idx_max_pixelvalue = idx_max_pixelvalue_green; msg_dominating_colourchannel = 'green' + elif max_pixelvalue == max_pixelvalue_blue: + idx_max_pixelvalue = idx_max_pixelvalue_blue; msg_dominating_colourchannel = 'blue' + + + cut_row=idx_max_pixelvalue[0]; cut_column=idx_max_pixelvalue[1] + + + # Red channel + # Info linspace(start, stop, num): + # stop: The end value of the sequence --> stop is included + # num: Number of samples to generate. + # Because Array stars at index 0: e.g. image with 3280x2464 Pixels: for 3280 Pixels you need 3280 values between 0 and 3279 + # check: x_red_h must have 3280 values?? --> Yes from 0 to 3279 in 1 steps + x_red_h = np.linspace(0, r.shape[1]-1, r.shape[1]); y_red_h = r[cut_row,:] # data horizontal cut ->width (e.g.: 3280) + x_red_v = np.linspace(0, r.shape[0]-1, r.shape[0]); y_red_v = r[:,cut_column] # data vertical cut ->height (e.g.: 2464) + + msg1_red = f"Maximum Pixel value in red channel: {max_pixelvalue_red}"; print(msg1_red) + msg2_red = f"Index of max Value in red channel (row, colum): {idx_max_pixelvalue_red}"; print(msg2_red) + msg3_red = f"Maximum Pixel value in marked-row {cut_row}: {np.max(y_red_h)}"; print(msg3_red) + msg4_red = f"Maximum Pixel value in marked-column {cut_column}: {np.max(y_red_v)}"; print(msg4_red) + r_copy = r.copy(); r_copy[cut_row:cut_row+plot_marker_offset,:]=255; r_copy[:, cut_column:cut_column+plot_marker_offset]=255 # manipulate image for displaying in marked plot + + # Green channel + x_green_h = np.linspace(0, g.shape[1]-1, g.shape[1]); y_green_h = g[cut_row,:] # data horizontal cut + x_green_v = np.linspace(0, g.shape[0]-1, g.shape[0]); y_green_v = g[:,cut_column] # data vertical cut + + msg1_green = f"Maximum Pixel value in green channel: {max_pixelvalue_green}"; print(msg1_green) + msg2_green = f"Index of max Value in green channel (row, colum): {idx_max_pixelvalue_green}"; print(msg2_green) + msg3_green = f"Maximum Pixel value in marked-row {cut_row}: {np.max(y_green_h)}"; print(msg3_green) + msg4_green = f"Maximum Pixel value in marked-column {cut_column}: {np.max(y_green_v)}"; print(msg4_green) + g_copy = g.copy(); g_copy[cut_row:cut_row+plot_marker_offset,:]=255; g_copy[:, cut_column:cut_column+plot_marker_offset]=255 # manipulate image for displaying in marked plot + + # Blue channel + x_blue_h = np.linspace(0, b.shape[1]-1, b.shape[1]); y_blue_h = b[cut_row,:] # data horizontal cut + x_blue_v = np.linspace(0, b.shape[0]-1, b.shape[0]); y_blue_v = b[:,cut_column] # data vertical cut + + msg1_blue = f"Maximum Pixel value in blue channel: {max_pixelvalue_blue}"; print(msg1_blue) + msg2_blue = f"Index of max Value in blue channel (row, colum): {idx_max_pixelvalue_blue}"; print(msg2_blue) + msg3_blue = f"Maximum Pixel value in marked-row {cut_row}: {np.max(y_blue_h)}"; print(msg3_blue) + msg4_blue = f"Maximum Pixel value in marked-column {cut_column}: {np.max(y_blue_v)}"; print(msg4_blue) + b_copy = b.copy(); b_copy[cut_row:cut_row+plot_marker_offset,:]=255; b_copy[:, cut_column:cut_column+plot_marker_offset]=255 # manipulate image for displaying in marked plot + + # Create Plots + fig1, ((ax_orig_1,ax01,ax02,ax03),(ax_red_1, ax_red_2, ax_red_3, ax_red_4), + (ax_green_1, ax_green_2, ax_green_3, ax_green_4),(ax_blue_1, ax_blue_2, ax_blue_3, ax_blue_4)) \ + = plt.subplots(4, 4, figsize=(30,25)) + fig1.suptitle(f'Image: {image_name}', y=0.9) + + yticks=np.append(np.arange(0,230,25), 255) # set yticks for cuts + xlim_max_h=r.shape[1] # xlim for horizontal cut. No special reason for choosing red channel. + xlim_max_v=r.shape[0] # xlim for vertical cut. No special reason for choosing red channel. + + ax_orig_1.imshow(img_rgb); ax_orig_1.set_title("Original Image"); + ax_orig_1.set_xlabel('Width=H=Columns'); ax_orig_1.set_ylabel('Heigth=V=Rows') + + # red channel + ax_red_1.imshow(r, cmap = 'gray'); ax_red_1.set_title("Red Channel"); + ax_red_1.set_xlabel('Width=H=Columns'); ax_red_1.set_ylabel('Heigth=V=Rows') + ax_red_2.imshow(r_copy, cmap = 'gray'); ax_red_2.set_title("Red Channel - marked"); + ax_red_2.set_xlabel('Width=H=Columns'); ax_red_2.set_ylabel('Heigth=V=Rows') + ax_red_3.plot(x_red_h,y_red_h, linewidth=2.0); ax_red_3.set_title("Horizontal Cut"); + ax_red_3.grid(True); ax_red_3.set_ylim(ymin=0, ymax=260); ax_red_3.set_yticks(yticks); ax_red_3.set_xlim(0,xlim_max_h) + ax_red_3.set_xlabel('Width=H=Columns'); ax_red_3.set_ylabel('Pixel Value') + ax_red_4.plot(x_red_v,y_red_v, linewidth=2.0); ax_red_4.set_title("Vertical Cut"); + ax_red_4.grid(True); ax_red_4.set_ylim(ymin=0, ymax=260); ax_red_4.set_yticks(yticks); ax_red_4.set_xlim(0, xlim_max_v) + ax_red_4.set_xlabel('Heigth=V=Rows'); ax_red_4.set_ylabel('Pixel Value') + + # green channel + ax_green_1.imshow(g, cmap = 'gray'); ax_green_1.set_title("Green Channel"); + ax_green_1.set_xlabel('Width=H=Columns'); ax_green_1.set_ylabel('Heigth=V=Rows') + ax_green_2.imshow(g_copy, cmap = 'gray'); ax_green_2.set_title("Green Channel - marked"); + ax_green_2.set_xlabel('Width=H=Columns'); ax_green_2.set_ylabel('Heigth=V=Rows') + ax_green_3.plot(x_green_h,y_green_h, linewidth=2.0); ax_green_3.set_title("Horizontal Cut"); + ax_green_3.grid(True); ax_green_3.set_ylim(ymin=0, ymax=260); ax_green_3.set_yticks(yticks); ax_green_3.set_xlim(0,xlim_max_h) + ax_green_3.set_xlabel('Width=H=Columns'); ax_green_3.set_ylabel('Pixel Value') + ax_green_4.plot(x_green_v,y_green_v, linewidth=2.0); ax_green_4.set_title("Vertical Cut"); + ax_green_4.grid(True); ax_green_4.set_ylim(ymin=0, ymax=260); ax_green_4.set_yticks(yticks); ax_green_4.set_xlim(0, xlim_max_v) + ax_green_4.set_xlabel('Heigth=V=Rows'); ax_green_4.set_ylabel('Pixel Value') + + # blue channel + ax_blue_1.imshow(b, cmap = 'gray'); ax_blue_1.set_title("Blue Channel"); + ax_blue_1.set_xlabel('Width=H=Columns'); ax_blue_1.set_ylabel('Heigth=V=Rows') + ax_blue_2.imshow(b_copy, cmap = 'gray'); ax_blue_2.set_title("Blue Channel - marked"); + ax_blue_2.set_xlabel('Width=H=Columns'); ax_blue_2.set_ylabel('Heigth=V=Rows') + ax_blue_3.plot(x_blue_h,y_blue_h, linewidth=2.0); ax_blue_3.set_title("Horizontal Cut"); + ax_blue_3.grid(True); ax_blue_3.set_ylim(ymin=0, ymax=260); ax_blue_3.set_yticks(yticks); ax_blue_3.set_xlim(0,xlim_max_h) + ax_blue_3.set_xlabel('Width=H=Columns'); ax_blue_3.set_ylabel('Pixel Value') + ax_blue_4.plot(x_blue_v,y_blue_v, linewidth=2.0); ax_blue_4.set_title("Vertical Cut"); + ax_blue_4.grid(True); ax_blue_4.set_ylim(ymin=0, ymax=260); ax_blue_4.set_yticks(yticks); ax_blue_4.set_xlim(0, xlim_max_v) + ax_blue_4.set_xlabel('Heigth=V=Rows'); ax_blue_4.set_ylabel('Pixel Value') + + + # Calculate overall brightness per pixel of the image. Mittelere Helligkeit pro pixel berechnen. + arithmetic_mean_of_brightness_per_pixel_relative = calc_arithmetic_mean_of_brightness_per_pixel(r,g,b) + if arithmetic_mean_of_brightness_per_pixel_relative >= 0.01: + msg1_brightness = f"Overall brightness per pixel: {round(arithmetic_mean_of_brightness_per_pixel_relative*100,2)} %" #value in percent + elif arithmetic_mean_of_brightness_per_pixel_relative < 0.01: + msg1_brightness = f"Overall brightness per pixel: {round(arithmetic_mean_of_brightness_per_pixel_relative*10e3,2)} per mil" # value in promille + print(msg1_brightness) + + # add pixel stats under Figure + pixelstats_red= '\n'.join([msg1_red, msg2_red, msg3_red, msg4_red]) + pixelstats_green= '\n'.join([msg1_green, msg2_green, msg3_green, msg4_green]) + pixelstats_blue= '\n'.join([msg1_blue, msg2_blue, msg3_blue, msg4_blue]) + pixelstats_overall_brightness = '\n'.join([msg1_brightness]) + pixelstats = '\n\n'.join([f"pixel stats: {msg_dominating_colourchannel} channel dominating", + pixelstats_red, pixelstats_green, pixelstats_blue, pixelstats_overall_brightness]) + text_x_pos = 0.1; text_y_pos = -0.015 # text position: 0,0 is lower-left and 1,1 is upper-right) + fig1.text(text_x_pos, text_y_pos, pixelstats, ha='left') + + # add Comment under Figure + text_x_pos = 0.5; text_y_pos = -0.025 # text position: 0,0 is lower-left and 1,1 is upper-right) + log_filename=None + try: + log_filename=[f for f in os.listdir(PATH_IMAGE_FOLDER) if f.endswith('.txt')][0] + if log_filename: + with open(os.path.join(PATH_IMAGE_FOLDER,log_filename), encoding='utf-8') as f: + log_text = f.readlines() + if comment != "": + comment = '\nPlot Comment: '+comment + log_text.append(comment) + txt=''.join(log_text) + fig1.text(text_x_pos, text_y_pos, txt, ha='left') + except IndexError: + if comment != "": + comment = '\nPlot Comment: '+comment + fig1.text(text_x_pos, text_y_pos, comment, ha='left') + else: + pass + + # handle numpy memmory error on Windows: + switch_overwrite=0 # do not overwrite files, if they exist + if switch_overwrite == 1: + fig1.savefig(os.path.join(PATH_IMAGE_FOLDER,f'{image_name}.pdf'), bbox_inches='tight') #save plot + print('Save pdf') + else: + if os.path.isfile(os.path.join(PATH_IMAGE_FOLDER,f'{image_name}.pdf')): + pass # skip this pdf file, because it already exists + else: + fig1.savefig(os.path.join(PATH_IMAGE_FOLDER,f'{image_name}.pdf'), bbox_inches='tight') #save plot + print('Save pdf') + plt.close('all') # close all figures + + print('') # new line for better readability in console + + +# start +def main(): + number_of_CPUS = cpu_count() + + image_filenames=[f for f in os.listdir(PATH_IMAGE_FOLDER) if f.endswith('.png')] + image_filenames.sort() + list_numbers = [x for x in range(len(image_filenames))] + number_of_total_images = len(image_filenames) + list_number_of_total_images = [number_of_total_images]*number_of_total_images # list with n times number of total images + list_COMMENT = [COMMENT]*number_of_total_images + + print(number_of_total_images) + print(len(list_numbers)) + + data_to_pass = list(zip(image_filenames, list_numbers, list_number_of_total_images, list_COMMENT)) + with Pool(number_of_CPUS) as pool: + pool.starmap(image_analysis, iterable=data_to_pass) + + t1 = round(time.perf_counter()/60,2) + print(f'Script finished in {t1} min') + +if __name__ == '__main__': + main() + diff --git a/01_Auswertung/Auswertung_Farbkanaele_V01-00.py b/01_Auswertung/Auswertung_Farbkanaele_V01-00.py new file mode 100644 index 0000000..60df8ca --- /dev/null +++ b/01_Auswertung/Auswertung_Farbkanaele_V01-00.py @@ -0,0 +1,222 @@ +# Date: 18.12.2021 +# Author: Kenan Gömek +# Das Skript stellt die unterschiedlichen Farbkanäle der aufgenommenen Bilder dar und erstellt schnitte, um die Pixelwerte anzuzeigen. +# Update:21.12.2021 +# Update Comment: Dominierender Farbkanal wird automatisch erkannt und die Zeilen- und Spaltenmarkierungen werden alle nach dem hellsten +# Punkt im dominierenden Farbkanal gerichtet + +# Import needed Packages +import cv2 as cv +import numpy as np +import time +from datetime import datetime +import os +from matplotlib import pyplot as plt + +# define User Input +# No vowels in path name!' +PATH_IMAGE_FOLDER = r'U:\bwsyncshare\Auswertung' +COMMENT = "" # User Comment for plot + +# select cut mode for determining pixel values +cut_mode = 'auto' # options: 'auto', 'manual' + +ROW = 152 # Row-Number for Cut [Pixels] for cut_mode 'manual' index begins from 0 +COLUMN = 1742 # Column-Number for Cut [Pixels] for cut_mode 'manual' index begins from 0 + + +# define functions +def calc_arithmetic_mean_of_brightness_per_pixel(r, g, b): + """Calculate overall brightness per pixel of the image. Mittelere Helligkeit pro pixel berechnen.""" + r = r.astype('uint16') # set dtype of one colour to uint16, because the sum of 255+255+255 >255 =765 + # the rest will also be uint16 then + image_heigth = r.shape[0] + image_width = r.shape[1] + + number_of_colour_channels = 3 + arithmetic_mean_of_brightness_image = np.sum((r+g+b)/number_of_colour_channels) + arithmetic_mean_of_brightness_per_pixel = arithmetic_mean_of_brightness_image/(image_width*image_heigth) + + max_possible_brightness = 255 # maximum possible brightness for an uint8 + arithmetic_mean_of_brightness_per_pixel_relative = arithmetic_mean_of_brightness_per_pixel/max_possible_brightness # range: 0-1 + + return arithmetic_mean_of_brightness_per_pixel_relative + + +# start +image_filenames = [f for f in os.listdir(PATH_IMAGE_FOLDER) if f.endswith('.png')] +image_filenames.sort() +for image_number, image_name in enumerate(image_filenames): + print(f"Current image {image_number+1}/{len(image_filenames)}: {image_name}") + + # Split and Merge colour channels + img_bgr = cv.imread(os.path.join(PATH_IMAGE_FOLDER,image_name), cv.IMREAD_COLOR) + b, g, r = cv.split(img_bgr) + img_rgb = img_bgr[:, :, ::-1] + + # Create Cuts and display Pixel Values + image_width = r.shape[1] + if image_width < 1000: + plot_marker_offset = 4 # Thickness of marker-line in plot in pixels + elif image_width >= 1000: + plot_marker_offset = 10 # Thickness of marker-line in plot in pixels + + + # Identify dominating colour channel and set cut row and column + max_pixelvalue_red = r.max(); max_pixelvalue_green = g.max(); max_pixelvalue_blue = b.max() + max_pixelvalue = np.max([max_pixelvalue_red, max_pixelvalue_green, max_pixelvalue_blue]) + + idx_max_pixelvalue_red = np.unravel_index(r.argmax(), r.shape) # row=idx_..[0] column=idx_..[1] + idx_max_pixelvalue_green = np.unravel_index(g.argmax(), g.shape) # row=idx_..[0] column=idx_..[1] + idx_max_pixelvalue_blue = np.unravel_index(b.argmax(), b.shape) # row=idx_..[0] column=idx_..[1] + if max_pixelvalue == max_pixelvalue_red: + idx_max_pixelvalue = idx_max_pixelvalue_red; msg_dominating_colourchannel = 'red' + elif max_pixelvalue == max_pixelvalue_green: + idx_max_pixelvalue = idx_max_pixelvalue_green; msg_dominating_colourchannel = 'green' + elif max_pixelvalue == max_pixelvalue_blue: + idx_max_pixelvalue = idx_max_pixelvalue_blue; msg_dominating_colourchannel = 'blue' + + if cut_mode == 'auto': + cut_row = idx_max_pixelvalue[0]; cut_column = idx_max_pixelvalue[1] + elif cut_mode == 'manual': + cut_row = ROW; cut_column = COLUMN + else: + print('Wrong cut_mode. End script.'); quit() + + # Red channel + # Info linspace(start, stop, num): + # stop: The end value of the sequence --> stop is included + # num: Number of samples to generate. + # Because Array stars at index 0: e.g. image with 3280x2464 Pixels: for 3280 Pixels you need 3280 values between 0 and 3279 + # check: x_red_h must have 3280 values?? --> Yes from 0 to 3279 in 1 steps + x_red_h = np.linspace(0, r.shape[1]-1, r.shape[1]); y_red_h = r[cut_row,:] # data horizontal cut ->width (e.g.: 3280) + x_red_v = np.linspace(0, r.shape[0]-1, r.shape[0]); y_red_v = r[:,cut_column] # data vertical cut ->height (e.g.: 2464) + + msg1_red = f"Maximum Pixel value in red channel: {max_pixelvalue_red}"; print(msg1_red) + msg2_red = f"Index of max Value in red channel (row, colum): {idx_max_pixelvalue_red}"; print(msg2_red) + msg3_red = f"Maximum Pixel value in marked-row {cut_row}: {np.max(y_red_h)}"; print(msg3_red) + msg4_red = f"Maximum Pixel value in marked-column {cut_column}: {np.max(y_red_v)}"; print(msg4_red) + r_copy = r.copy(); r_copy[cut_row:cut_row+plot_marker_offset,:]=255; r_copy[:, cut_column:cut_column+plot_marker_offset]=255 # manipulate image for displaying in marked plot + + # Green channel + x_green_h = np.linspace(0, g.shape[1]-1, g.shape[1]); y_green_h = g[cut_row,:] # data horizontal cut + x_green_v = np.linspace(0, g.shape[0]-1, g.shape[0]); y_green_v = g[:,cut_column] # data vertical cut + + msg1_green = f"Maximum Pixel value in green channel: {max_pixelvalue_green}"; print(msg1_green) + msg2_green = f"Index of max Value in green channel (row, colum): {idx_max_pixelvalue_green}"; print(msg2_green) + msg3_green = f"Maximum Pixel value in marked-row {cut_row}: {np.max(y_green_h)}"; print(msg3_green) + msg4_green = f"Maximum Pixel value in marked-column {cut_column}: {np.max(y_green_v)}"; print(msg4_green) + g_copy = g.copy(); g_copy[cut_row:cut_row+plot_marker_offset,:]=255; g_copy[:, cut_column:cut_column+plot_marker_offset]=255 # manipulate image for displaying in marked plot + + # Blue channel + x_blue_h = np.linspace(0, b.shape[1]-1, b.shape[1]); y_blue_h = b[cut_row,:] # data horizontal cut + x_blue_v = np.linspace(0, b.shape[0]-1, b.shape[0]); y_blue_v = b[:,cut_column] # data vertical cut + + msg1_blue = f"Maximum Pixel value in blue channel: {max_pixelvalue_blue}"; print(msg1_blue) + msg2_blue = f"Index of max Value in blue channel (row, colum): {idx_max_pixelvalue_blue}"; print(msg2_blue) + msg3_blue = f"Maximum Pixel value in marked-row {cut_row}: {np.max(y_blue_h)}"; print(msg3_blue) + msg4_blue = f"Maximum Pixel value in marked-column {cut_column}: {np.max(y_blue_v)}"; print(msg4_blue) + b_copy = b.copy(); b_copy[cut_row:cut_row+plot_marker_offset,:]=255; b_copy[:, cut_column:cut_column+plot_marker_offset]=255 # manipulate image for displaying in marked plot + + # Create Plots + fig1, ((ax_orig_1,ax01,ax02,ax03),(ax_red_1, ax_red_2, ax_red_3, ax_red_4), + (ax_green_1, ax_green_2, ax_green_3, ax_green_4),(ax_blue_1, ax_blue_2, ax_blue_3, ax_blue_4)) \ + = plt.subplots(4, 4, figsize=(30,25)) + fig1.suptitle(f'Image: {image_name}', y=0.9) + + yticks=np.append(np.arange(0,230,25), 255) #set yticks for cuts + xlim_max_h=r.shape[1] # xlim for horizontal cut. No special reason for choosing red channel. + xlim_max_v=r.shape[0] # xlim for vertical cut. No special reason for choosing red channel. + + ax_orig_1.imshow(img_rgb); ax_orig_1.set_title("Original Image"); + ax_orig_1.set_xlabel('Width=H=Columns'); ax_orig_1.set_ylabel('Heigth=V=Rows') + + # red channel + ax_red_1.imshow(r, cmap = 'gray'); ax_red_1.set_title("Red Channel"); + ax_red_1.set_xlabel('Width=H=Columns'); ax_red_1.set_ylabel('Heigth=V=Rows') + ax_red_2.imshow(r_copy, cmap = 'gray'); ax_red_2.set_title("Red Channel - marked"); + ax_red_2.set_xlabel('Width=H=Columns'); ax_red_2.set_ylabel('Heigth=V=Rows') + ax_red_3.plot(x_red_h,y_red_h, linewidth=2.0); ax_red_3.set_title("Horizontal Cut"); + ax_red_3.grid(True); ax_red_3.set_ylim(ymin=0, ymax=260); ax_red_3.set_yticks(yticks); ax_red_3.set_xlim(0,xlim_max_h) + ax_red_3.set_xlabel('Width=H=Columns'); ax_red_3.set_ylabel('Pixel Value') + ax_red_4.plot(x_red_v,y_red_v, linewidth=2.0); ax_red_4.set_title("Vertical Cut"); + ax_red_4.grid(True); ax_red_4.set_ylim(ymin=0, ymax=260); ax_red_4.set_yticks(yticks); ax_red_4.set_xlim(0, xlim_max_v) + ax_red_4.set_xlabel('Heigth=V=Rows'); ax_red_4.set_ylabel('Pixel Value') + + # green channel + ax_green_1.imshow(g, cmap = 'gray'); ax_green_1.set_title("Green Channel"); + ax_green_1.set_xlabel('Width=H=Columns'); ax_green_1.set_ylabel('Heigth=V=Rows') + ax_green_2.imshow(g_copy, cmap = 'gray'); ax_green_2.set_title("Green Channel - marked"); + ax_green_2.set_xlabel('Width=H=Columns'); ax_green_2.set_ylabel('Heigth=V=Rows') + ax_green_3.plot(x_green_h,y_green_h, linewidth=2.0); ax_green_3.set_title("Horizontal Cut"); + ax_green_3.grid(True); ax_green_3.set_ylim(ymin=0, ymax=260); ax_green_3.set_yticks(yticks); ax_green_3.set_xlim(0,xlim_max_h) + ax_green_3.set_xlabel('Width=H=Columns'); ax_green_3.set_ylabel('Pixel Value') + ax_green_4.plot(x_green_v,y_green_v, linewidth=2.0); ax_green_4.set_title("Vertical Cut"); + ax_green_4.grid(True); ax_green_4.set_ylim(ymin=0, ymax=260); ax_green_4.set_yticks(yticks); ax_green_4.set_xlim(0, xlim_max_v) + ax_green_4.set_xlabel('Heigth=V=Rows'); ax_green_4.set_ylabel('Pixel Value') + + # blue channel + ax_blue_1.imshow(b, cmap = 'gray'); ax_blue_1.set_title("Blue Channel"); + ax_blue_1.set_xlabel('Width=H=Columns'); ax_blue_1.set_ylabel('Heigth=V=Rows') + ax_blue_2.imshow(b_copy, cmap = 'gray'); ax_blue_2.set_title("Blue Channel - marked"); + ax_blue_2.set_xlabel('Width=H=Columns'); ax_blue_2.set_ylabel('Heigth=V=Rows') + ax_blue_3.plot(x_blue_h,y_blue_h, linewidth=2.0); ax_blue_3.set_title("Horizontal Cut"); + ax_blue_3.grid(True); ax_blue_3.set_ylim(ymin=0, ymax=260); ax_blue_3.set_yticks(yticks); ax_blue_3.set_xlim(0,xlim_max_h) + ax_blue_3.set_xlabel('Width=H=Columns'); ax_blue_3.set_ylabel('Pixel Value') + ax_blue_4.plot(x_blue_v,y_blue_v, linewidth=2.0); ax_blue_4.set_title("Vertical Cut"); + ax_blue_4.grid(True); ax_blue_4.set_ylim(ymin=0, ymax=260); ax_blue_4.set_yticks(yticks); ax_blue_4.set_xlim(0, xlim_max_v) + ax_blue_4.set_xlabel('Heigth=V=Rows'); ax_blue_4.set_ylabel('Pixel Value') + + + # Calculate overall brightness per pixel of the image. Mittelere Helligkeit pro pixel berechnen. + arithmetic_mean_of_brightness_per_pixel_relative = calc_arithmetic_mean_of_brightness_per_pixel(r,g,b) + if arithmetic_mean_of_brightness_per_pixel_relative >= 0.01: + msg1_brightness = f"Overall brightness per pixel: {round(arithmetic_mean_of_brightness_per_pixel_relative*100,2)} %" #value in percent + elif arithmetic_mean_of_brightness_per_pixel_relative < 0.01: + msg1_brightness = f"Overall brightness per pixel: {round(arithmetic_mean_of_brightness_per_pixel_relative*10e3,2)} per mil" # value in promille + print(msg1_brightness) + + # add pixel stats under Figure + pixelstats_red= '\n'.join([msg1_red, msg2_red, msg3_red, msg4_red]) + pixelstats_green= '\n'.join([msg1_green, msg2_green, msg3_green, msg4_green]) + pixelstats_blue= '\n'.join([msg1_blue, msg2_blue, msg3_blue, msg4_blue]) + pixelstats_overall_brightness = '\n'.join([msg1_brightness]) + pixelstats = '\n\n'.join([f"pixel stats: {msg_dominating_colourchannel} channel dominating", + pixelstats_red, pixelstats_green, pixelstats_blue, pixelstats_overall_brightness]) + text_x_pos = 0.1; text_y_pos = -0.015 # text position: 0,0 is lower-left and 1,1 is upper-right) + fig1.text(text_x_pos, text_y_pos, pixelstats, ha='left') + + # add Comment under Figure + text_x_pos = 0.5; text_y_pos = -0.025 # text position: 0,0 is lower-left and 1,1 is upper-right) + log_filename=None + try: + log_filename=[f for f in os.listdir(PATH_IMAGE_FOLDER) if f.endswith('.txt')][0] + if log_filename: + with open(os.path.join(PATH_IMAGE_FOLDER,log_filename), encoding='utf-8') as f: + log_text = f.readlines() + if COMMENT != "": + COMMENT = '\nPlot Comment: '+COMMENT + log_text.append(COMMENT) + txt=''.join(log_text) + fig1.text(text_x_pos, text_y_pos, txt, ha='left') + except IndexError: + if COMMENT != "": + COMMENT = '\nPlot Comment: '+COMMENT + fig1.text(text_x_pos, text_y_pos, COMMENT, ha='left') + else: + pass + + # handle numpy memmory error on Windows: + switch_overwrite=0 # do not overwrite files, if they exist + if switch_overwrite == 1: + fig1.savefig(os.path.join(PATH_IMAGE_FOLDER,f'{image_name}.pdf'), bbox_inches='tight') #save plot + print('Save pdf') + else: + if os.path.isfile(os.path.join(PATH_IMAGE_FOLDER,f'{image_name}.pdf')): + pass # skip this pdf file, because it already exists + else: + fig1.savefig(os.path.join(PATH_IMAGE_FOLDER,f'{image_name}.pdf'), bbox_inches='tight') #save plot + print('Save pdf') + plt.close('all') # close all figures + + print('') # new line for better readability in console \ No newline at end of file diff --git a/01_How-To-Control-LEDs-Manually.txt b/01_How-To-Control-LEDs-Manually.txt new file mode 100644 index 0000000..1a2e49f --- /dev/null +++ b/01_How-To-Control-LEDs-Manually.txt @@ -0,0 +1,20 @@ +You have to connect the serial bus cable from the uC to the Raspberry. +It was connected to the lower middle USB-Port in this case, + +1. start bash +(2. workon cv-4.5.3.56) +3. python +4. import serial +5. s = serial.Serial('/dev/ttyACM0', 9600) + +Now you can control the LEDs. + +To let the stripe light up in one color: +s.write(str.encode('RRR-GGG-BBB')) +eg: s.write(str.encode('255-000-000')) + +To let the stripe light up in one of the 20 color patterns: +s.write(str.encode('color_pattern_XX')), where x is 01-20 +eg: s.write(str.encode('color_pattern_01')) + +Now you can start a program to detect the leds \ No newline at end of file diff --git a/02_Kameraaufnahme/01_How-To-Start-Scripts.txt b/02_Kameraaufnahme/01_How-To-Start-Scripts.txt new file mode 100644 index 0000000..1dadafc --- /dev/null +++ b/02_Kameraaufnahme/01_How-To-Start-Scripts.txt @@ -0,0 +1,3 @@ +1. start bash +2. workon cv-4.5.3.56 +3. python $SCRIPT_NAME$ \ No newline at end of file diff --git a/02_Kameraaufnahme/PiCamera_shoot_manual_image.py b/02_Kameraaufnahme/PiCamera_shoot_manual_image.py new file mode 100644 index 0000000..96af1c9 --- /dev/null +++ b/02_Kameraaufnahme/PiCamera_shoot_manual_image.py @@ -0,0 +1,188 @@ +# Creation Date: 02.03.2022 +# Author: Kenan Gömek +# This script takes pictures with Picameras VideoPort like it will be used to work with OpenCV and saves it with OpenCV to have the real use case pictures. +# This script is designed for shooting manually images with 'i' +# Press 'q' to exit +# Change camera parameters with v,b,n,m,x,c,o. See code for more information. + + +import cv2 as cv +import picamera +from picamera.array import PiRGBArray +from fractions import Fraction + +import time +from datetime import datetime +import os + +import numpy as np +import matplotlib.pyplot as plt + + +# Define camera settings + +# divide origin resoluton by a number, to have the origin aspect ratio +# RESOLUTION = (3280, 2464) # Max Photo-Resolution CAM03 and CAM04 # no image with PiCamera Videoport at this Resolution.. Probably GPU Memory and CPU issues. +# RESOLUTION = (1640,1232) # 2nd best Resolution for CAM03 and CAM04 with FUll FOV (2x2 binning) # Mode 4 +SENSOR_MODE = 4 # corresponding sensor mode to resolution 1640x1232 +# OUTPUT_RESOLUTION = (960, 720) +OUTPUT_RESOLUTION = (416, 320) +#OUTPUT_RESOLUTION = (416, 320) # (1640x1232)/4=(410,308) + # (410,308) is being upscaled to (416,320) from ISP (warning in bash), but image will have still (410,308) pixels. +# OUTPUT_RESOLUTION = (820, 616) # (1640x1232)/2=(820,616) + # bash: frame size rounded up from 820x616 to 832x624 + + +AWB_MODE = 'off' # Auto white balance mode +# AWB_GAINS = (Fraction(485, 256), Fraction(397, 256)) # White Balance Gains to have colours read correctly: (red, blue) +AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, float or fraction are valid. +BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100] + # the brighter, the brighter the LEDs and the higher the RGB values and vice versa! +CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100] + +# ISO = 100 # ISO value +ISO = 320 # ISO value +EXPOSURE_MODE = 'off' +FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4 + +SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s + +# Define Funcions +def take_image_picamera_opencv(shutter_speed): + folder_exists=False + # Initialise Camera + with picamera.PiCamera() as camera: + with PiRGBArray(camera) as output: + # Set camera settings + camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode + camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage! + camera.framerate = FRAMERATE + + camera.awb_mode = AWB_MODE + camera.awb_gains = AWB_GAINS + + camera.iso = ISO + camera.shutter_speed = shutter_speed + # it was found that, you have to set the right shutter speed at the first initalisation of the current runtime of the program. + # The gains (analog, digital) will adjust to this set up. + # After the gains are fixed, they will never change! even if you change the shutter speed during the runtime. + # To get consistent brightness values, set the right shutter speed at initalisation once. + + time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off + camera.exposure_mode = EXPOSURE_MODE + + time.sleep(1) # wait before applying brightness and contrast + camera.brightness = BRIGHTNESS + camera.contrast = CONTRAST + time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings + + # camera.start_preview() # show camera preview through PiCamera interface + # camera.annotate_frame_num=True # Controls whether the current frame number is drawn as an annotation. + + for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)): + framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1 + image = frame.array # raw NumPy array without JPEG encoding + + camera_exposure_speed = camera.exposure_speed # Retrieves the current shutter speed of the camera. + + cv.imshow("Current Frame", image) # display the image without text + + output.truncate(0) # clear the stream for next frame + + # Only uncomment following code if you display the image. No errors if not commented, but higher fps if commented. + # if q is pressed, break from loop. + pressed_key = cv.waitKey(2) & 0xff + if pressed_key == ord('q'): + break + elif pressed_key == ord('i'): # Take image from manipulated image if i is pressed + if not folder_exists: + path_saveFolder, folder_exists = create_folder_for_captures() + + now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M-%S.%f")[:-3] + cv.imwrite(f"{path_saveFolder}/img_ss{shutter_speed}_iso{ISO}_res{OUTPUT_RESOLUTION[0]}x{OUTPUT_RESOLUTION[1]}_Date {d1}.png", image) + print('took image!') + elif pressed_key == ord('v'): # increase shutterspeed by 5 + shutter_speed = round(shutter_speed+5) + camera.shutter_speed = shutter_speed + time.sleep(2) # wait to shutter speed is applied before querying exposure speed + exposure_speed = camera.exposure_speed + print(f"shutter speed set to: {shutter_speed}") + print(f"retrieved shutter speed: {exposure_speed}") + elif pressed_key == ord('b'): # increase shutterspeed by 50 + shutter_speed = round(shutter_speed+50) + camera.shutter_speed = shutter_speed + print(f"shutter speed set to: {shutter_speed}") + time.sleep(2) # wait to shutter speed is applied before querying exposure speed + exposure_speed = camera.exposure_speed + print(f"retrieved shutter speed: {exposure_speed}") + elif pressed_key == ord('n'): # increase shutterspeed by 500 + shutter_speed = round(shutter_speed+500) + print(f"shutter speed set to: {shutter_speed}") + time.sleep(2) # wait to shutter speed is applied before querying exposure speed + exposure_speed = camera.exposure_speed + print(f"retrieved shutter speed: {exposure_speed}") + elif pressed_key == ord('m'): # max shutterspeed + shutter_speed = round(1/FRAMERATE*1e6) + camera.shutter_speed = shutter_speed + print(f"shutter speed set to: {shutter_speed}") + time.sleep(2) # wait to shutter speed is applied before querying exposure speed + exposure_speed = camera.exposure_speed + print(f"retrieved shutter speed: {exposure_speed}") + elif pressed_key == ord('x'): # decrease shutterspeed by 500 + shutter_speed = round(shutter_speed-500) + camera.shutter_speed = shutter_speed + print(f"shutter speed set to: {shutter_speed}") + time.sleep(2) # wait to shutter speed is applied before querying exposure speed + exposure_speed = camera.exposure_speed + print(f"retrieved shutter speed: {exposure_speed}") + elif pressed_key == ord('c'): # decrease shutterspeed by 50 + shutter_speed = round(shutter_speed-50) + camera.shutter_speed = shutter_speed + print(f"shutter speed set to: {shutter_speed}") + time.sleep(2) # wait to shutter speed is applied before querying exposure speed + exposure_speed = camera.exposure_speed + print(f"retrieved shutter speed: {exposure_speed}") + elif pressed_key == ord('o'): # set shutterspeed to 0 + shutter_speed = 0 + camera.shutter_speed = shutter_speed + print(f"shutter speed set to: {shutter_speed}") + time.sleep(2) # wait to shutter speed is applied before querying exposure speed + exposure_speed = camera.exposure_speed + print(f"retrieved shutter speed: {exposure_speed}") + + +def create_folder_for_captures(): + # Create folder for saving the captured pictures + now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M") + path_cwd = os.getcwd() + + path_saveFolder = path_cwd+r"/Capture_"+d1 + try: + os.mkdir(path_saveFolder) + folder_exists = True + except OSError: + print("Error! Ending script.") + quit() + + return path_saveFolder, folder_exists + + +# Start Script + +# start capture series for different shutter speeds +print('start caputure...') + +take_image_picamera_opencv(shutter_speed=50) +# take_image_picamera_opencv(shutter_speed=round(1/FRAMERATE*1e6)) # max shutter-speed depending on fps: 1/2 fps = 500 000 µs + +# End Script + +cv.destroyAllWindows() + +print('Script finished') + + + + + + diff --git a/02_Kameraaufnahme/Rest/mp_raspividyuv_fast_capture_cannyedge.py b/02_Kameraaufnahme/Rest/mp_raspividyuv_fast_capture_cannyedge.py new file mode 100644 index 0000000..b08b57d --- /dev/null +++ b/02_Kameraaufnahme/Rest/mp_raspividyuv_fast_capture_cannyedge.py @@ -0,0 +1,112 @@ +# This script uses multi processing and raspividyuv to capture frames @35 fps with canny edge detection +# To-Do: +# -set parameter for camera +# -typos and uniformity +# -comment things better and make more steps for easier understanding for non-python-people (or maybe proficiency level beginner) +# -capture led strips and save video +# -How about OV-Sensor? because putting camera in sensor mode 4 and requesting image with less resolution drops fps. +# -Ist blitzen auch bei diesem skript da??!?!?!??!? + +import cv2 as cv +import numpy as np +import subprocess as sp +import time +import atexit + +frames = [] # stores the video sequence for the demo +max_frames =500 + +N_frames = 0 + +# Video capture parameters +(w,h) = (416,320) # height must be multiple of 32 and width multiple of 16 +colour_channels = 3 +bytesPerFrame = w * h * colour_channels +fps = 35 # setting to 250 will request the maximum framerate possible + +sensor_mode = 4 + +# "raspividyuv" is the command that provides camera frames in YUV format +# "--output -" specifies stdout as the output +# "--timeout 0" specifies continuous video +# "--luma" discards chroma channels, only luminance is sent through the pipeline +# see "raspividyuv --help" for more information on the parameters +# videoCmd = "raspividyuv -w "+str(w)+" -h "+str(h)+" --output - --timeout 0 --framerate "+str(fps)+" --rgb --nopreview --mode "+str(sensor_mode) +videoCmd = f"raspividyuv -w {w} -h {h} --output - --timeout 0 --framerate {fps} --rgb --nopreview --mode {sensor_mode}" + # with sensor mode 38 fps instead of 72 fps --> How about Camera OV-Sensor? +# videoCmd = "raspividyuv -w "+str(w)+" -h "+str(h)+" --output - --timeout 0 --framerate "+str(fps)+" --rgb --nopreview" +videoCmd = videoCmd.split() # Popen requires that each parameter is a separate string + +cameraProcess = sp.Popen(videoCmd, stdout=sp.PIPE) # start the camera +atexit.register(cameraProcess.terminate) # this closes the camera process in case the python scripts exits unexpectedly + +# wait for the first frame and discard it (only done to measure time more accurately) +rawStream = cameraProcess.stdout.read(bytesPerFrame) + +print("Start...") + +start_time = time.time() + +while True: + cameraProcess.stdout.flush() # discard any frames that we were not able to process in time + + frame = np.frombuffer(cameraProcess.stdout.read(bytesPerFrame), dtype=np.uint8) # raw NumPy array without JPEG encoding + + if frame.size != bytesPerFrame: + print("Error: Camera stream closed unexpectedly") + break + frame.shape = (h,w,colour_channels) # set dimensions for numpy array --> from (921600,) to (480,640,3) + + + # do the processing here with OpenCV + + frame = cv.cvtColor(frame,cv.COLOR_BGR2RGB) # convert frame to rgb + + # test + frame_gs=cv.cvtColor(frame,cv.COLOR_RGB2GRAY) + frame = cv.Canny(frame_gs, 50,150) + + + frames.append(frame) # save the frame (for the demo) + + N_frames += 1 + + #test + #put text + font = cv.FONT_HERSHEY_SIMPLEX # font + fontScale = 1 # fontScale + color = (255, 255, 255) # Font colour in BGR + thickness = 1 # Line thickness in px + + # set text position + frame_width = int(frame.shape[1]) + frame_height = int(frame.shape[0]) + text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height + text_linespacing = 50 # line spacing between two strings in pixels + # text_start_position_X = int(frame_width/4) # start text from 1/4 of image width + text_start_position_X = int(0) # start text from left edge of image + pos_1 = (text_start_position_X, text_start_position_Y) + text_line_1 = f"Frame: {N_frames}" + cv.putText(frame, text_line_1, pos_1, font, fontScale, color, thickness, cv.LINE_AA) + + # if N_frames > max_frames: break #if i deactivate cv.imshow i can control end of program with this parameter. + + + cv.imshow("Current Frame", frame) # display the image + pressed_key = cv.waitKey(1) & 0xff + if pressed_key == ord('q'): + break + + + +cv.destroyAllWindows() + + +end_time = time.time() + +cameraProcess.terminate() # stop the camera + + +elapsed_seconds = end_time-start_time +print(f"Finish! Result: {(N_frames/elapsed_seconds)} fps") + diff --git a/02_Kameraaufnahme/Rest/mp_raspividyuv_fast_rgb.py b/02_Kameraaufnahme/Rest/mp_raspividyuv_fast_rgb.py new file mode 100644 index 0000000..9ce6173 --- /dev/null +++ b/02_Kameraaufnahme/Rest/mp_raspividyuv_fast_rgb.py @@ -0,0 +1,120 @@ +# This script uses multi processing and raspividyuv to capture frames @35 fps with canny edge detection +# To-Do: +# -set parameter for camera +# -typos and uniformity +# -comment things better and make more steps for easier understanding for non-python-people (or maybe proficiency level beginner) +# -capture led strips and save video +# -How about OV-Sensor? because putting camera in sensor mode 4 and requesting image with less resolution drops fps. +# -Ist blitzen auch bei diesem skript da??!?!?!??!? + +import cv2 as cv +import numpy as np +import subprocess as sp +import time +import atexit + +from fractions import Fraction + +max_frames =500 + +N_frames = 0 + +# Video capture parameters +(w,h) = (416,320) # height must be multiple of 16 and width multiple of 16 +colour_channels = 3 +bytesPerFrame = w * h * colour_channels +fps = 30 # setting to 250 will request the maximum framerate possible + +sensor_mode = 4 + + +AWB_MODE = 'off' # Auto white balance mode +AWB_GAINS = f"{485/256},{397/256}" # White Balance Gains to have colours read correctly: (red, blue) +ISO = 100 # ISO value +EXPOSURE_MODE = 'off' + +shutterspeed = 20000 + +# "raspividyuv" is the command that provides camera frames in YUV format +# "--output -" specifies stdout as the output +# "--timeout 0" specifies continuous video +# "--luma" discards chroma channels, only luminance is sent through the pipeline +# see "raspividyuv --help" for more information on the parameters +# videoCmd = "raspividyuv -w "+str(w)+" -h "+str(h)+" --output - --timeout 0 --framerate "+str(fps)+" --rgb --nopreview --mode "+str(sensor_mode) +videoCmd = (f"raspividyuv -w {w} -h {h} --output - --timeout 0 --framerate {fps} --rgb --nopreview " + f"--mode {sensor_mode} --awb {AWB_MODE} -awbg {AWB_GAINS} --ISO {ISO} --exposure {EXPOSURE_MODE} -ss {shutterspeed}") + + # with sensor mode 38 fps instead of 72 fps --> How about Camera OV-Sensor? +# videoCmd = "raspividyuv -w "+str(w)+" -h "+str(h)+" --output - --timeout 0 --framerate "+str(fps)+" --rgb --nopreview" +videoCmd = videoCmd.split() # Popen requires that each parameter is a separate string + +cameraProcess = sp.Popen(videoCmd, stdout=sp.PIPE) # start the camera +atexit.register(cameraProcess.terminate) # this closes the camera process in case the python scripts exits unexpectedly + +# wait for the first frame and discard it (only done to measure time more accurately) +rawStream = cameraProcess.stdout.read(bytesPerFrame) + +print("Start...") + +start_time = time.time() + +while True: + cameraProcess.stdout.flush() # discard any frames that we were not able to process in time + + frame = np.frombuffer(cameraProcess.stdout.read(bytesPerFrame), dtype=np.uint8) # raw NumPy array without JPEG encoding + + if frame.size != bytesPerFrame: + print("Error: Camera stream closed unexpectedly") + break + frame.shape = (h,w,colour_channels) # set dimensions for numpy array --> from (921600,) to (480,640,3) + + + # do the processing here with OpenCV + + r,g,b = cv.split(frame) # OpenCV works with bgr. Image is also speciefied to be captured in bgr. + + frame = cv.cvtColor(frame,cv.COLOR_BGR2RGB) # convert frame to rgb + + N_frames += 1 + + #put text + font = cv.FONT_HERSHEY_SIMPLEX # font + fontScale = 1 # fontScale + color = (255, 255, 255) # Font colour in BGR + thickness = 1 # Line thickness in px + + # set text position + frame_width = int(frame.shape[1]) + frame_height = int(frame.shape[0]) + text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height + text_linespacing = 50 # line spacing between two strings in pixels + # text_start_position_X = int(frame_width/4) # start text from 1/4 of image width + text_start_position_X = int(0) # start text from left edge of image + pos_1 = (text_start_position_X, text_start_position_Y) + text_line_1 = f"Frame: {N_frames}" + cv.putText(frame, text_line_1, pos_1, font, fontScale, color, thickness, cv.LINE_AA) + + # if N_frames > max_frames: break #if i deactivate cv.imshow i can control end of program with this parameter. + + + cv.imshow("Current Frame", frame) # display the image + cv.imshow("b", b) + cv.imshow("g", g) + cv.imshow("r", r) + pressed_key = cv.waitKey(1) & 0xff + if pressed_key == ord('q'): + break + + + +cv.destroyAllWindows() + + +end_time = time.time() + +cameraProcess.terminate() # stop the camera + + +elapsed_seconds = end_time-start_time +print(f"Finish! Result: {(N_frames/elapsed_seconds)} fps") + diff --git a/02_Kameraaufnahme/mp_Vorbereitung/PiCameraVideoPort_mp.pdf b/02_Kameraaufnahme/mp_Vorbereitung/PiCameraVideoPort_mp.pdf new file mode 100644 index 0000000..31b0bb6 --- /dev/null +++ b/02_Kameraaufnahme/mp_Vorbereitung/PiCameraVideoPort_mp.pdf @@ -0,0 +1,4559 @@ +%PDF-1.3 +%߬ +3 0 obj +<> +endobj +4 0 obj +<< +/Length 18561 +>> +stream +0.200025 w +0 G +q +1. 0. 0. -1. 0. 1860. cm +q +1. 0. 0. 1. 0. 0. cm +1. w +0. g +q +1. 0. 0. 1. 0. 0. cm +0. 0. 2483. 1860. re +W +n +q +q +1. g +1. 0. 0. 1. 0. 0. cm +0. 0. m +2483. 0. l +2483. 1860. l +0. 1860. l +h +f +Q +q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-150. -840.9375 m +-150. -815.0625 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 352.5 448.76164 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-150. -943.46875 m +-150. -905.0625 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 352.5 358.76164 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-150. -1102.5 m +-150. -1068.53125 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 352.5 195.29289 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-150. -750.9375 m +-150. -725.0625 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 352.5 538.76164 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-150. -660.9375 m +-150. -635.53125 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 352.5 628.29289 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-1.25 -570. m +55.25 -570. l +S +Q +q +/GS1 gs +q +-1. 0. 0. -1. 562.04289 689.53125 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-150. -510.46875 m +-150. -485.0625 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 352.5 778.76164 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-150. -240.9375 m +-150. -190. l +-150. -183.33333 -153.33333 -180. -160. -180. c +-350. -180. l +-356.66667 -180. -360. -183.33333 -360. -190. c +-360. -560. l +-360. -566.66667 -356.66667 -570. -350. -570. c +-304.75 -570. l +S +Q +q +/GS1 gs +q +-1. 0. 0. -1. 202.04289 689.53125 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-1.25 -450. m +55.25 -450. l +S +Q +q +/GS1 gs +q +-1. 0. 0. -1. 562.04289 809.53125 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-150. -420.9375 m +-150. -395.0625 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 352.5 868.76164 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-150. -330.9375 m +-150. -305.0625 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 352.5 958.76164 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-1.25 -270. m +55.25 -270. l +S +Q +q +/GS1 gs +q +-1. 0. 0. -1. 562.04289 989.53125 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +810. -990.9375 m +810. -875.0625 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 1312.5 388.76164 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +1410. -990.9375 m +1410. -965.0625 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 1912.5 298.76164 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +1800. -990.9375 m +1800. -965.0625 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 2302.5 298.76164 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +810. -810.9375 m +810. -785.0625 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 1312.5 478.76164 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +661.25 -120. m +610. -120. l +603.33333 -120. 600. -123.33333 600. -130. c +600. -830. l +600. -836.66667 603.33333 -840. 610. -840. c +655.25 -840. l +S +Q +q +/GS1 gs +q +-1. 0. 0. -1. 1162.04289 419.53125 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +661.25 540. m +580. 540. l +573.33333 540. 570. 536.66667 570. 530. c +570. -830. l +570. -836.66667 573.33333 -840. 580. -840. c +655.25 -840. l +S +Q +q +/GS1 gs +q +-1. 0. 0. -1. 1162.04289 419.53125 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +660. 30. m +580. 30. l +573.33333 30. 570. 26.66667 570. 20. c +570. -830. l +570. -836.66667 573.33333 -840. 580. -840. c +655.25 -840. l +S +Q +q +/GS1 gs +q +-1. 0. 0. -1. 1162.04289 419.53125 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0.0000000000000001 0.0000000000000001 -1. 1110.83333 1281.53125 Tm +(no) Tj +ET +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +810. -660. m +810. -635.53125 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 1312.5 628.29289 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +810. -510.46875 m +810. -485.75 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 1312.5 778.07414 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +810. -420. m +810. -395.0625 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 1312.5 868.76164 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1279.41667 856.69467 Tm +(yes) Tj +ET +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +960. -449.875 m +1010. -449.875 l +1016.66667 -449.875 1020. -446.54167 1020. -439.875 c +1020. -40. l +1020. -33.33333 1016.66667 -30. 1010. -30. c +820. -30. l +813.33333 -30. 810. -26.66667 810. -20. c +810. -5.875 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 1312.5 1257.94914 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1485.83333 825.65625 Tm +(no) Tj +ET +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +810. -270. m +810. -245.0625 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 1312.5 1018.76164 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +810. -180.9375 m +810. -155.0625 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 1312.5 1108.76164 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +810. 59.875 m +810. 84.9375 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 1312.5 1348.76164 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0.0000000000000001 0.0000000000000001 -1. 1288.16667 1341.7518 Tm +(yes) Tj +ET +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +810. 210. m +810. 234.9375 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 1312.5 1498.76164 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +810. 299.0625 m +810. 324.9375 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 1312.5 1588.76164 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +810. 390. m +810. 414.46875 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 1312.5 1678.29289 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +810. 479.53125 m +810. 504.46875 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 1312.5 1768.29289 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +1410. -900.9375 m +1410. -875.0625 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 1912.5 388.76164 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +1.5 w +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +1800. -900.9375 m +1800. -875.0625 l +S +Q +q +/GS1 gs +q +0. -1. 1. 0. 2302.5 388.76164 cm +q +1. 0. 0. 1. 0. 0. cm +/Xo1 Do +Q +Q +Q +Q +q +q +0.82 0.87 0.93 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-298.75 -899.0625 m +-1.25 -899.0625 l +-1.25 -840.9375 l +-298.75 -840.9375 l +h +B +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 339.5 393.60625 Tm +(Main) Tj +ET +Q +Q +Q +q +q +1. 0.64 0.64 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-298.75 -1062.53125 m +-1.25 -1062.53125 l +-1.25 -943.46875 l +-298.75 -943.46875 l +h +B +Q +Q +q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 282.16666 215.60625 Tm +(create shared memory for:) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 334.5 233.60625 Tm +(- bools) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 291.16667 251.60625 Tm +(- framenumber \(uint64\)) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 321.5 269.60625 Tm +(- red_frame) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 316.5 287.60625 Tm +(- green frame) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 320.5 305.60625 Tm +(- blue frame) Tj +ET +Q +Q +Q +Q +q +q +1. 0.64 0.64 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-298.75 -809.0625 m +-1.25 -809.0625 l +-1.25 -750.9375 l +-298.75 -750.9375 l +h +B +Q +Q +q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 300.16667 474.60625 Tm +(create 3 processes:) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 281.83334 492.60625 Tm +(one for each color channel) Tj +ET +Q +Q +Q +Q +q +q +1. 0.64 0.64 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-298.75 -719.0625 m +-1.25 -719.0625 l +-1.25 -660.9375 l +-298.75 -660.9375 l +h +B +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 263.5 573.60625 Tm +(start activity of created processes) Tj +ET +Q +Q +Q +q +q +0.82 0.87 0.93 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-298.75 -629.53125 m +-1.25 -629.53125 l +-1.25 -510.46875 l +-298.75 -510.46875 l +h +B +Q +Q +q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 259.40833 675.60625 Tm +(get frames @25 fps from Picamera) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 298.16667 693.60625 Tm +(as raw NumPy array) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 290.16667 711.60625 Tm +(without JPEG encoding) Tj +ET +Q +Q +Q +Q +q +q +0.82 0.87 0.93 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +61.25 -629.53125 m +358.75 -629.53125 l +358.75 -510.46875 l +61.25 -510.46875 l +h +B +Q +Q +q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 614.5 675.60625 Tm +(always the newest frame is recieved:) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 620.83334 693.60625 Tm +(processing must be faster than fps) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 619.16666 711.60625 Tm +(if every frame should be processed) Tj +ET +Q +Q +Q +Q +q +q +1. 0.64 0.64 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-298.75 -479.0625 m +-1.25 -479.0625 l +-1.25 -420.9375 l +-298.75 -420.9375 l +h +B +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 292.41667 813.60625 Tm +(increase framenumber) Tj +ET +Q +Q +Q +q +q +1. 0.64 0.64 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +61.25 -479.0625 m +358.75 -479.0625 l +358.75 -420.9375 l +61.25 -420.9375 l +h +B +Q +Q +q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 625.89167 804.60625 Tm +(shm_bools[0] = True \(newframe\)) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 612.91666 822.60625 Tm +(shm_framenumber[0] = framenumber) Tj +ET +Q +Q +Q +Q +q +q +0.82 0.87 0.93 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-298.75 -389.0625 m +-1.25 -389.0625 l +-1.25 -330.9375 l +-298.75 -330.9375 l +h +B +Q +Q +q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 256.83334 894.60625 Tm +(split frame into: b, g, r - components) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 270.16666 912.60625 Tm +(and allocate to shared memory) Tj +ET +Q +Q +Q +Q +q +q +1. 0.64 0.64 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-298.75 -299.0625 m +-1.25 -299.0625 l +-1.25 -240.9375 l +-298.75 -240.9375 l +h +B +Q +Q +q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 242.125 984.60625 Tm +(set trigger to "True" for each colorchannel) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 284.83334 1002.60625 Tm +(for start of the processing) Tj +ET +Q +Q +Q +Q +q +q +1. 0.64 0.64 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +61.25 -299.0625 m +358.75 -299.0625 l +358.75 -240.9375 l +61.25 -240.9375 l +h +B +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 647.89167 993.60625 Tm +(shm_bools[7,8,9] = True) Tj +ET +Q +Q +Q +q +q +0.51 0.95 1. rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +661.25 -1049.0625 m +1950. -1049.0625 l +1950. -990.9375 l +661.25 -990.9375 l +h +B +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1766.45833 243.60625 Tm +(Multiprocessing) Tj +ET +Q +Q +Q +q +q +1. 0.64 0.64 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +661.25 -869.0625 m +958.75 -869.0625 l +958.75 -810.9375 l +661.25 -810.9375 l +h +B +Q +Q +q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1263.08333 414.60625 Tm +(read framenumber) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1255.5 432.60625 Tm +(shm_framenumber[0]) Tj +ET +Q +Q +Q +Q +q +q +1. 0.64 0.64 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +661.25 -779.0625 m +958.75 -779.0625 l +958.75 -660. l +661.25 -660. l +h +B +Q +Q +q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1250.5 508.075 Tm +(conditions for first start:) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1297.33333 526.075 Tm +(- i = 0) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1223.89167 544.075 Tm +(- shm_bools[0]= True \(newframe\)) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1196.66666 562.075 Tm +(- shm_bools[1,2,3] = False \(p_rgb_finished\)) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1193.66666 580.075 Tm +(- shm_bools[7] = TRUE \(p_red_start_trigger\)) Tj +ET +Q +Q +Q +Q +q +q +1. 0.64 0.64 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +661.25 -629.53125 m +958.75 -629.53125 l +958.75 -510.46875 l +661.25 -510.46875 l +h +B +Q +Q +q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1231.16666 675.60625 Tm +(conditions for start processing:) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1207.33334 693.60625 Tm +(- framenumber > last_processed_frame) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1193.66666 711.60625 Tm +(- shm_bools[7] = TRUE \(p_red_start_trigger\)) Tj +ET +Q +Q +Q +Q +q +q +0.82 0.87 0.93 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +810. -479.75 m +960. -449.875 l +810. -420. l +660. -449.875 l +h +B +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1233.225 813.73125 Tm +(conditions for first start = True) Tj +ET +Q +Q +Q +q +q +1. 0.64 0.64 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +630. -389.0625 m +990. -389.0625 l +990. -270. l +630. -270. l +h +B +Q +Q +q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1302.83333 907.075 Tm +(set:) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1211.55833 925.075 Tm +(- shm_bools[4] = True \(p_red_started\)) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1158.35834 943.075 Tm +(- shm_bools[7] = False \(p_red_start_trigger\) | reset trigger) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1206.66666 961.075 Tm +(- shm_bools[1] = False \(p_red_finished\)) Tj +ET +Q +Q +Q +Q +q +q +0.82 0.87 0.93 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +661.25 -239.0625 m +958.75 -239.0625 l +958.75 -180.9375 l +661.25 -180.9375 l +h +B +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1275.16667 1053.60625 Tm +(do processing) Tj +ET +Q +Q +Q +q +q +1. 0.64 0.64 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +661.25 -149.0625 m +958.75 -149.0625 l +958.75 -60. l +661.25 -60. l +h +B +Q +Q +q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1297.5 1132.075 Tm +(i += 1) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1301.16667 1150.075 Tm +(set: ) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1209.225 1168.075 Tm +(- shm_bools[1] = True \(p_red_finished\)) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1209. 1186.075 Tm +(- shm_bools[4] = False \(p_red_started\)) Tj +ET +Q +Q +Q +Q +q +q +0.82 0.87 0.93 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +810. 0.125 m +960. 30. l +810. 59.875 l +660. 30. l +h +B +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1215.66666 1293.60625 Tm +(conditions for start processing = true) Tj +ET +Q +Q +Q +q +q +1. 0.64 0.64 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +630. 90.9375 m +990. 90.9375 l +990. 210. l +630. 210. l +h +B +Q +Q +q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1302.83333 1387.075 Tm +(set:) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1211.55833 1405.075 Tm +(- shm_bools[4] = True \(p_red_started\)) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1158.35834 1423.075 Tm +(- shm_bools[7] = False \(p_red_start_trigger\) | reset trigger) Tj +ET +Q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1206.66666 1441.075 Tm +(- shm_bools[1] = False \(p_red_finished\)) Tj +ET +Q +Q +Q +Q +q +q +1. 0.96 0.32 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +661.25 240.9375 m +958.75 240.9375 l +958.75 299.0625 l +661.25 299.0625 l +h +B +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1275.16667 1533.60625 Tm +(do processing) Tj +ET +Q +Q +Q +q +q +1. 0.64 0.64 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +661.25 330.9375 m +958.75 330.9375 l +958.75 390. l +661.25 390. l +h +B +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1212.89167 1624.075 Tm +(shm_bools[1] = True \(p_red_finished\)) Tj +ET +Q +Q +Q +q +q +1. 0.64 0.64 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +661.25 420.46875 m +958.75 420.46875 l +958.75 479.53125 l +661.25 479.53125 l +h +B +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1210.91666 1713.60625 Tm +(last_processed_frame = framenumber) Tj +ET +Q +Q +Q +q +q +1. 0.64 0.64 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +661.25 510.46875 m +958.75 510.46875 l +958.75 569.53125 l +661.25 569.53125 l +h +B +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1212.66666 1803.60625 Tm +(shm_bools[4] = False \(p_red_started\)) Tj +ET +Q +Q +Q +q +q +0.82 0.87 0.93 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +661.25 -959.0625 m +958.75 -959.0625 l +958.75 -900.9375 l +661.25 -900.9375 l +h +B +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1271.5 333.60625 Tm +(processing_red) Tj +ET +Q +Q +Q +q +q +0.82 0.87 0.93 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +1261.25 -959.0625 m +1558.75 -959.0625 l +1558.75 -900.9375 l +1261.25 -900.9375 l +h +B +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1864.83333 333.60625 Tm +(processing_green) Tj +ET +Q +Q +Q +q +q +0.82 0.87 0.93 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +1651.25 -959.0625 m +1948.75 -959.0625 l +1948.75 -900.9375 l +1651.25 -900.9375 l +h +B +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 2258.83333 333.60625 Tm +(processing_blue) Tj +ET +Q +Q +Q +q +q +0.82 0.87 0.93 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +1261.25 -869.0625 m +1558.75 -869.0625 l +1558.75 570. l +1261.25 570. l +h +B +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 1847.16666 1114.075 Tm +(same as processing_red) Tj +ET +Q +Q +Q +q +q +0.82 0.87 0.93 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +1651.25 -869.0625 m +1948.75 -869.0625 l +1948.75 570. l +1651.25 570. l +h +B +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 2237.16666 1114.075 Tm +(same as processing_red) Tj +ET +Q +Q +Q +q +q +0.82 0.87 0.93 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-150. -1102.5 m +-223.1665 -1102.5 -282.5 -1119.2925 -282.5 -1140. c +-282.5 -1160.7075 -223.1665 -1177.5 -150. -1177.5 c +-76.8335 -1177.5 -17.5 -1160.7075 -17.5 -1140. c +-17.5 -1119.2925 -76.8335 -1102.5 -150. -1102.5 c +h +B +Q +Q +q +q +q +BT +/F1 12 Tf +13.8 TL +0. g +1. 0. 0. -1. 339.83333 123.60625 Tm +(Start) Tj +ET +Q +Q +Q +q +/GS3 gs +0.27 1. 0. rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +-472.5 -645.531 m +437.75 -645.531 l +437.75 -150. l +-472.5 -150. l +h +B +Q +q +q +q +BT +/F1 17 Tf +19.55 TL +0. g +0. -1. -1. 0. 45.86667 960.46562 Tm +(Get frames from picamera) Tj +ET +Q +Q +Q +q +q +/GS4 gs +0.82 0.87 0.93 rg +0. G +10. M +1. 0. 0. 1. 502.5 1259.53125 cm +225. -1229.53125 m +615. -1229.53125 l +615. -1110.46875 l +225. -1110.46875 l +h +B +Q +Q +q +q +q +q +BT +/F2 27 Tf +31.05 TL +0. g +1. 0. 0. -1. 812.225 58.19791 Tm +(mp_vorbereitung) Tj +ET +Q +q +BT +/F2 27 Tf +31.05 TL +0. g +1. 0. 0. -1. 875.95 98.69791 Tm +(V06_04) Tj +ET +Q +q +BT +/F2 27 Tf +31.05 TL +0. g +1. 0. 0. -1. 854.93333 139.19791 Tm +(28.02.2023) Tj +ET +Q +Q +Q +Q +Q +Q +Q +Q +Q +endstream +endobj +1 0 obj +<> +endobj +5 0 obj +<< +/Type /Font +/BaseFont /Helvetica +/Subtype /Type1 +/Encoding /WinAnsiEncoding +/FirstChar 32 +/LastChar 255 +>> +endobj +6 0 obj +<< +/Type /Font +/BaseFont /Helvetica-Bold +/Subtype /Type1 +/Encoding /WinAnsiEncoding +/FirstChar 32 +/LastChar 255 +>> +endobj +7 0 obj +<< +/Type /Font +/BaseFont /Helvetica-Oblique +/Subtype /Type1 +/Encoding /WinAnsiEncoding +/FirstChar 32 +/LastChar 255 +>> +endobj +8 0 obj +<< +/Type /Font +/BaseFont /Helvetica-BoldOblique +/Subtype /Type1 +/Encoding /WinAnsiEncoding +/FirstChar 32 +/LastChar 255 +>> +endobj +9 0 obj +<< +/Type /Font +/BaseFont /Courier +/Subtype /Type1 +/Encoding /WinAnsiEncoding +/FirstChar 32 +/LastChar 255 +>> +endobj +10 0 obj +<< +/Type /Font +/BaseFont /Courier-Bold +/Subtype /Type1 +/Encoding /WinAnsiEncoding +/FirstChar 32 +/LastChar 255 +>> +endobj +11 0 obj +<< +/Type /Font +/BaseFont /Courier-Oblique +/Subtype /Type1 +/Encoding /WinAnsiEncoding +/FirstChar 32 +/LastChar 255 +>> +endobj +12 0 obj +<< +/Type /Font +/BaseFont /Courier-BoldOblique +/Subtype /Type1 +/Encoding /WinAnsiEncoding +/FirstChar 32 +/LastChar 255 +>> +endobj +13 0 obj +<< +/Type /Font +/BaseFont /Times-Roman +/Subtype /Type1 +/Encoding /WinAnsiEncoding +/FirstChar 32 +/LastChar 255 +>> +endobj +14 0 obj +<< +/Type /Font +/BaseFont /Times-Bold +/Subtype /Type1 +/Encoding /WinAnsiEncoding +/FirstChar 32 +/LastChar 255 +>> +endobj +15 0 obj +<< +/Type /Font +/BaseFont /Times-Italic +/Subtype /Type1 +/Encoding /WinAnsiEncoding +/FirstChar 32 +/LastChar 255 +>> +endobj +16 0 obj +<< +/Type /Font +/BaseFont /Times-BoldItalic +/Subtype /Type1 +/Encoding /WinAnsiEncoding +/FirstChar 32 +/LastChar 255 +>> +endobj +17 0 obj +<< +/Type /Font +/BaseFont /ZapfDingbats +/Subtype /Type1 +/FirstChar 32 +/LastChar 255 +>> +endobj +18 0 obj +<< +/Type /Font +/BaseFont /Symbol +/Subtype /Type1 +/FirstChar 32 +/LastChar 255 +>> +endobj +19 0 obj +<< +/ca 1. +/CA 0. +>> +endobj +20 0 obj +<< +/ca 1. +/CA 1. +>> +endobj +21 0 obj +<< +/ca 0.04 +>> +endobj +22 0 obj +<< +/ca 0. +>> +endobj +23 0 obj +<< +/Type /XObject +/Subtype /Form +/BBox [-2.5 -10. 7.5 10.] +/Matrix [1. 0. 0. 1. 0. 0.] +/Length 77 +>> +stream +q +/GS2 gs +0. g +0. G +10. M +1. 0. 0. 1. 0. 0. cm +5. -5. m +0. 0. l +5. 5. l +h +B +Q +endstream +endobj +2 0 obj +<< +/ProcSet [/PDF /Text /ImageB /ImageC /ImageI] +/Font << +/F1 5 0 R +/F2 6 0 R +/F3 7 0 R +/F4 8 0 R +/F5 9 0 R +/F6 10 0 R +/F7 11 0 R +/F8 12 0 R +/F9 13 0 R +/F10 14 0 R +/F11 15 0 R +/F12 16 0 R +/F13 17 0 R +/F14 18 0 R +>> +/ExtGState << +/GS1 19 0 R +/GS2 20 0 R +/GS3 21 0 R +/GS4 22 0 R +>> +/XObject << +/Xo1 23 0 R +>> +>> +endobj +24 0 obj +<< +/Producer (jsPDF 2.3.1) +/CreationDate (D:20220228182020+01'00') +>> +endobj +25 0 obj +<< +/Type /Catalog +/Pages 1 0 R +/OpenAction [3 0 R /FitH null] +/PageLayout /OneColumn +>> +endobj +xref +0 26 +0000000000 65535 f +0000018736 00000 n +0000020897 00000 n +0000000015 00000 n +0000000122 00000 n +0000018793 00000 n +0000018918 00000 n +0000019048 00000 n +0000019181 00000 n +0000019318 00000 n +0000019441 00000 n +0000019570 00000 n +0000019702 00000 n +0000019838 00000 n +0000019966 00000 n +0000020093 00000 n +0000020222 00000 n +0000020355 00000 n +0000020457 00000 n +0000020553 00000 n +0000020589 00000 n +0000020625 00000 n +0000020656 00000 n +0000020685 00000 n +0000021222 00000 n +0000021308 00000 n +trailer +<< +/Size 26 +/Root 25 0 R +/Info 24 0 R +/ID [ ] +>> +startxref +21412 +%%EOF +3 0 obj <>/Font<>/ProcSet[/PDF/Text]/XObject<>>>/Type/Page>> endobj 23 0 obj <>>>/Subtype/Form/Type/XObject>>stream +0 TL +q +0 g +0 G +10 M +/GS0 gs +q 1 0 0 1 5 -5 cm +0 0 m +-5 5 l +0 10 l +h +B +Q +Q + +endstream endobj 24 0 obj <> endobj 25 0 obj <> endobj 26 0 obj <> endobj 27 0 obj <>stream + + + + + 2022-02-28T18:20:20+01:00 + 2022-03-08T08:18:51+01:00 + 2022-03-08T08:18:51+01:00 + jsPDF 2.3.1 + application/pdf + uuid:3224bb93-0008-4556-8c5d-c39429a4ac86 + uuid:7d80532f-629c-4714-9ee4-e23becec9431 + + + + + + + + + + + + + + + + + + + + + + + + + +endstream endobj 28 0 obj <>stream +q +1 0 0 -1 0 1860 cm +0 0 2483 1860 re +W n +1 g +0 0 m +2483 0 l +2483 1860 l +0 1860 l +h +f +1 0 0 1 502.5 1259.53125 cm +1.5 w +q 1 0 0 1 -150 -840.9375 cm +0 0 m +0 25.875 l +S +Q +q +0 -1 1 0 -150 -810.76961 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +q 1 0 0 1 -150 -943.4687 cm +0 0 m +0 38.406 l +S +Q +q +0 -1 1 0 -150 -900.76961 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +q 1 0 0 1 -150 -1102.5 cm +0 0 m +0 33.969 l +S +Q +q +0 -1 1 0 -150 -1064.23836 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +q 1 0 0 1 -150 -750.9375 cm +0 0 m +0 25.875 l +S +Q +q +0 -1 1 0 -150 -720.76961 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +q 1 0 0 1 -150 -660.9375 cm +0 0 m +0 25.406 l +S +Q +q +0 -1 1 0 -150 -631.23836 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +q 1 0 0 1 -1.25 -570 cm +0 0 m +56.5 0 l +S +Q +q +-1 0 0 -1 59.54289 -570 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +q 1 0 0 1 -150 -510.4687 cm +0 0 m +0 25.406 l +S +Q +q +0 -1 1 0 -150 -480.76961 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +q 1 0 0 1 -150 -240.9375 cm +0 0 m +0 50.938 l +0 57.604 -3.333 60.938 -10 60.938 c +-200 60.938 l +-206.667 60.938 -210 57.604 -210 50.938 c +-210 -319.062 l +-210 -325.729 -206.667 -329.062 -200 -329.062 c +-154.75 -329.062 l +S +Q +q +-1 0 0 -1 -300.45711 -570 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +q 1 0 0 1 -1.25 -450 cm +0 0 m +56.5 0 l +S +Q +q +-1 0 0 -1 59.54289 -450 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +q 1 0 0 1 -150 -420.9375 cm +0 0 m +0 25.875 l +S +Q +q +0 -1 1 0 -150 -390.76961 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +q 1 0 0 1 -150 -330.9375 cm +0 0 m +0 25.875 l +S +Q +q +0 -1 1 0 -150 -300.76961 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +q 1 0 0 1 -1.25 -270 cm +0 0 m +56.5 0 l +S +Q +q +-1 0 0 -1 59.54289 -270 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +q 1 0 0 1 810 -990.9375 cm +0 0 m +0 115.875 l +S +Q +q +0 -1 1 0 810 -870.76961 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +q 1 0 0 1 1410 -990.9375 cm +0 0 m +0 25.875 l +S +Q +q +0 -1 1 0 1410 -960.76961 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +q 1 0 0 1 1800 -990.9375 cm +0 0 m +0 25.875 l +S +Q +q +0 -1 1 0 1800 -960.76961 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +q 1 0 0 1 810 -810.9375 cm +0 0 m +0 25.875 l +S +Q +q +0 -1 1 0 810 -780.76961 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +q 1 0 0 1 661.25 -120 cm +0 0 m +-51.25 0 l +-57.917 0 -61.25 -3.333 -61.25 -10 c +-61.25 -710 l +-61.25 -716.667 -57.917 -720 -51.25 -720 c +-6 -720 l +S +Q +q +-1 0 0 -1 659.54289 -840 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +q 1 0 0 1 661.25 540 cm +0 0 m +-81.25 0 l +-87.917 0 -91.25 -3.333 -91.25 -10 c +-91.25 -1370 l +-91.25 -1376.667 -87.917 -1380 -81.25 -1380 c +-6 -1380 l +S +Q +q +-1 0 0 -1 659.54289 -840 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +q 1 0 0 1 660 30 cm +0 0 m +-80 0 l +-86.667 0 -90 -3.333 -90 -10 c +-90 -860 l +-90 -866.667 -86.667 -870 -80 -870 c +-4.75 -870 l +S +Q +q +-1 0 0 -1 659.54289 -840 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 -1 -502.5 600.46875 cm +BT +0 g +/T1_0 12 Tf +1 -0 0 1 1110.8333 578.4687 Tm +(no)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +q 1 0 0 1 810 -660 cm +0 0 m +0 24.469 l +S +Q +Q +q +0 1 1 0 1312.5 1231.70711 cm +1 w +/GS0 gs +/Fm0 Do +Q +q +1 0 0 -1 502.5 600.46875 cm +q 1 0 0 1 810 -510.4687 cm +0 0 m +0 24.719 l +S +Q +Q +q +0 1 1 0 1312.5 1081.92586 cm +1 w +/GS0 gs +/Fm0 Do +Q +q +1 0 0 -1 502.5 600.46875 cm +q 1 0 0 1 810 -420 cm +0 0 m +0 24.937 l +S +Q +Q +q +0 1 1 0 1312.5 991.23836 cm +1 w +/GS0 gs +/Fm0 Do +Q +BT +1279.417 1003.305 Td +(yes)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +q 1 0 0 1 960 -449.875 cm +0 0 m +50 0 l +56.667 0 60 3.333 60 10 c +60 409.875 l +60 416.542 56.667 419.875 50 419.875 c +-140 419.875 l +-146.667 419.875 -150 423.208 -150 429.875 c +-150 444 l +S +Q +Q +q +0 1 1 0 1312.5 602.05086 cm +1 w +/GS0 gs +/Fm0 Do +Q +BT +1485.833 1034.344 Td +(no)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +q 1 0 0 1 810 -270 cm +0 0 m +0 24.937 l +S +Q +Q +q +0 1 1 0 1312.5 841.23836 cm +1 w +/GS0 gs +/Fm0 Do +Q +q +1 0 0 -1 502.5 600.46875 cm +q 1 0 0 1 810 -180.9375 cm +0 0 m +0 25.875 l +S +Q +Q +q +0 1 1 0 1312.5 751.23836 cm +1 w +/GS0 gs +/Fm0 Do +Q +q +1 0 0 -1 502.5 600.46875 cm +q 1 0 0 1 810 59.875 cm +0 0 m +0 25.062 l +S +Q +Q +q +0 1 1 0 1312.5 511.23836 cm +1 w +/GS0 gs +/Fm0 Do +Q +BT +1 -0 0 1 1288.1667 518.2482 Tm +(yes)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +q 1 0 0 1 810 210 cm +0 0 m +0 24.937 l +S +Q +Q +q +0 1 1 0 1312.5 361.23836 cm +1 w +/GS0 gs +/Fm0 Do +Q +q +1 0 0 -1 502.5 600.46875 cm +q 1 0 0 1 810 299.0625 cm +0 0 m +0 25.875 l +S +Q +Q +q +0 1 1 0 1312.5 271.23836 cm +1 w +/GS0 gs +/Fm0 Do +Q +q +1 0 0 -1 502.5 600.46875 cm +q 1 0 0 1 810 390 cm +0 0 m +0 24.469 l +S +Q +Q +q +0 1 1 0 1312.5 181.70711 cm +1 w +/GS0 gs +/Fm0 Do +Q +q +1 0 0 -1 502.5 600.46875 cm +q 1 0 0 1 810 479.5312 cm +0 0 m +0 24.937 l +S +Q +Q +q +0 1 1 0 1312.5 91.70711 cm +1 w +/GS0 gs +/Fm0 Do +Q +q +1 0 0 -1 502.5 600.46875 cm +q 1 0 0 1 1410 -900.9375 cm +0 0 m +0 25.875 l +S +Q +Q +q +0 1 1 0 1912.5 1471.23836 cm +1 w +/GS0 gs +/Fm0 Do +Q +q +1 0 0 -1 502.5 600.46875 cm +q 1 0 0 1 1800 -900.9375 cm +0 0 m +0 25.875 l +S +Q +Q +q +0 1 1 0 2302.5 1471.23836 cm +1 w +/GS0 gs +/Fm0 Do +Q +q +1 0 0 -1 502.5 600.46875 cm +0.82 0.87 0.93 rg +1 w +q 1 0 0 1 -298.75 -899.0625 cm +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +Q +BT +339.5 1466.394 Td +(Main)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +1 0.64 0.64 rg +1 w +q 1 0 0 1 -298.75 -1062.5313 cm +0 0 m +297.5 0 l +297.5 119.063 l +0 119.063 l +h +B +Q +Q +BT +282.167 1644.394 Td +(create shared memory for:)Tj +52.333 -18 Td +[(-)-278 (bools)]TJ +-43.333 -18 Td +[(-)-278 (framenumber \(uint64\))]TJ +30.333 -18 Td +[(-)-278 (red_frame)]TJ +-5 -18 Td +[(-)-278 (green frame)]TJ +4 -18 Td +[(-)-278 (blue frame)]TJ +ET +q +1 0 0 -1 502.5 600.46875 cm +1 0.64 0.64 rg +1 w +q 1 0 0 1 -298.75 -809.0625 cm +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +Q +BT +300.167 1385.394 Td +(create 3 processes:)Tj +-18.333 -18 Td +(one for each color channel)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +1 0.64 0.64 rg +1 w +q 1 0 0 1 -298.75 -719.0625 cm +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +Q +BT +263.5 1286.394 Td +(start activity of created processes)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +0.82 0.87 0.93 rg +1 w +q 1 0 0 1 -298.75 -629.5312 cm +0 0 m +297.5 0 l +297.5 119.063 l +0 119.063 l +h +B +Q +Q +BT +259.408 1184.394 Td +(get frames @25 fps from Picamera)Tj +38.758 -18 Td +(as raw NumPy array)Tj +-8 -18 Td +(without JPEG encoding)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +0.82 0.87 0.93 rg +1 w +q 1 0 0 1 61.25 -629.5312 cm +0 0 m +297.5 0 l +297.5 119.063 l +0 119.063 l +h +B +Q +Q +BT +614.5 1184.394 Td +(always the newest frame is recieved:)Tj +6.333 -18 Td +(processing must be faster than fps)Tj +-1.667 -18 Td +(if every frame should be processed)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +1 0.64 0.64 rg +1 w +q 1 0 0 1 -298.75 -479.0625 cm +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +Q +BT +292.417 1046.394 Td +(increase framenumber)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +1 0.64 0.64 rg +1 w +q 1 0 0 1 61.25 -479.0625 cm +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +Q +BT +625.892 1055.394 Td +(shm_bools[0] = True \(newframe\))Tj +-12.975 -18 Td +(shm_framenumber[0] = framenumber)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +0.82 0.87 0.93 rg +1 w +q 1 0 0 1 -298.75 -389.0625 cm +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +Q +BT +256.833 965.394 Td +(split frame into: b, g, r - components)Tj +13.333 -18 Td +(and allocate to shared memory)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +1 0.64 0.64 rg +1 w +q 1 0 0 1 -298.75 -299.0625 cm +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +Q +BT +242.125 875.394 Td +(set trigger to "True" for each colorchannel)Tj +42.708 -18 Td +(for start of the processing)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +1 0.64 0.64 rg +1 w +q 1 0 0 1 61.25 -299.0625 cm +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +Q +BT +647.892 866.394 Td +(shm_bools[7,8,9] = True)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +0.51 0.95 1 rg +1 w +q 1 0 0 1 661.25 -1049.0625 cm +0 0 m +1288.75 0 l +1288.75 58.125 l +0 58.125 l +h +B +Q +Q +BT +1766.458 1616.394 Td +(Multiprocessing)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +1 0.64 0.64 rg +1 w +q 1 0 0 1 661.25 -869.0625 cm +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +Q +BT +1263.083 1445.394 Td +(read framenumber)Tj +-7.583 -18 Td +(shm_framenumber[0])Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +1 0.64 0.64 rg +1 w +q 1 0 0 1 661.25 -779.0625 cm +0 0 m +297.5 0 l +297.5 119.063 l +0 119.063 l +h +B +Q +Q +BT +1250.5 1351.925 Td +(conditions for first start:)Tj +46.833 -18 Td +[(-)-278 (i = 0)]TJ +-73.442 -18 Td +[(-)-278 (shm_bools[0]= True \(newframe\))]TJ +-27.225 -18 Td +[(-)-278 (shm_bools[1,2,3] = False \(p_rgb_finished\))]TJ +-3 -18 Td +[(-)-278 (shm_bools[7] = TRUE \(p_red_start_trigger\))]TJ +ET +q +1 0 0 -1 502.5 600.46875 cm +1 0.64 0.64 rg +1 w +q 1 0 0 1 661.25 -629.5312 cm +0 0 m +297.5 0 l +297.5 119.063 l +0 119.063 l +h +B +Q +Q +BT +1231.167 1184.394 Td +(conditions for start processing:)Tj +-23.833 -18 Td +[(-)-278 (framenumber > last_processed_frame)]TJ +-13.667 -18 Td +[(-)-278 (shm_bools[7] = TRUE \(p_red_start_trigger\))]TJ +ET +q +1 0 0 -1 502.5 600.46875 cm +0.82 0.87 0.93 rg +1 w +q 1 0 0 1 810 -479.75 cm +0 0 m +150 29.875 l +0 59.75 l +-150 29.875 l +h +B +Q +Q +BT +1233.225 1046.269 Td +(conditions for first start = True)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +1 0.64 0.64 rg +1 w +q 1 0 0 1 630 -389.0625 cm +0 0 m +360 0 l +360 119.063 l +0 119.063 l +h +B +Q +Q +BT +1302.833 952.925 Td +(set:)Tj +-91.275 -18 Td +[(-)-278 (shm_bools[4] = True \(p_red_started\))]TJ +-53.2 -18 Td +[(-)-278 (shm_bools[7] = False \(p_red_start_trigger\) | reset trigger)]TJ +48.308 -18 Td +[(-)-278 (shm_bools[1] = False \(p_red_finished\))]TJ +ET +q +1 0 0 -1 502.5 600.46875 cm +0.82 0.87 0.93 rg +1 w +q 1 0 0 1 661.25 -239.0625 cm +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +Q +BT +1275.167 806.394 Td +(do processing)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +1 0.64 0.64 rg +1 w +q 1 0 0 1 661.25 -149.0625 cm +0 0 m +297.5 0 l +297.5 89.063 l +0 89.063 l +h +B +Q +Q +BT +1297.5 727.925 Td +(i += 1)Tj +3.667 -18 Td +(set: )Tj +-91.942 -18 Td +[(-)-278 (shm_bools[1] = True \(p_red_finished\))]TJ +-0.225 -18 Td +[(-)-278 (shm_bools[4] = False \(p_red_started\))]TJ +ET +q +1 0 0 -1 502.5 600.46875 cm +0.82 0.87 0.93 rg +1 w +q 1 0 0 1 810 0.125 cm +0 0 m +150 29.875 l +0 59.75 l +-150 29.875 l +h +B +Q +Q +BT +1215.667 566.394 Td +(conditions for start processing = true)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +1 0.64 0.64 rg +1 w +q 1 0 0 1 630 90.9375 cm +0 0 m +360 0 l +360 119.063 l +0 119.063 l +h +B +Q +Q +BT +1302.833 472.925 Td +(set:)Tj +-91.275 -18 Td +[(-)-278 (shm_bools[4] = True \(p_red_started\))]TJ +-53.2 -18 Td +[(-)-278 (shm_bools[7] = False \(p_red_start_trigger\) | reset trigger)]TJ +48.308 -18 Td +[(-)-278 (shm_bools[1] = False \(p_red_finished\))]TJ +ET +q +1 0 0 -1 502.5 600.46875 cm +1 0.96 0.32 rg +1 w +q 1 0 0 1 661.25 240.9375 cm +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +Q +BT +1275.167 326.394 Td +(do processing)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +1 0.64 0.64 rg +1 w +q 1 0 0 1 661.25 330.9375 cm +0 0 m +297.5 0 l +297.5 59.063 l +0 59.063 l +h +B +Q +Q +BT +1212.892 235.925 Td +(shm_bools[1] = True \(p_red_finished\))Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +1 0.64 0.64 rg +1 w +q 1 0 0 1 661.25 420.4687 cm +0 0 m +297.5 0 l +297.5 59.063 l +0 59.063 l +h +B +Q +Q +BT +1210.917 146.394 Td +(last_processed_frame = framenumber)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +1 0.64 0.64 rg +1 w +q 1 0 0 1 661.25 510.4687 cm +0 0 m +297.5 0 l +297.5 59.063 l +0 59.063 l +h +B +Q +Q +BT +1212.667 56.394 Td +(shm_bools[4] = False \(p_red_started\))Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +0.82 0.87 0.93 rg +1 w +q 1 0 0 1 661.25 -959.0625 cm +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +Q +BT +1271.5 1526.394 Td +(processing_red)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +0.82 0.87 0.93 rg +1 w +q 1 0 0 1 1261.25 -959.0625 cm +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +Q +BT +1864.833 1526.394 Td +(processing_green)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +0.82 0.87 0.93 rg +1 w +q 1 0 0 1 1651.25 -959.0625 cm +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +Q +BT +2258.833 1526.394 Td +(processing_blue)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +0.82 0.87 0.93 rg +1 w +q 1 0 0 1 1261.25 -869.0625 cm +0 0 m +297.5 0 l +297.5 1439.063 l +0 1439.063 l +h +B +Q +Q +BT +1847.167 745.925 Td +(same as processing_red)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +0.82 0.87 0.93 rg +1 w +q 1 0 0 1 1651.25 -869.0625 cm +0 0 m +297.5 0 l +297.5 1439.063 l +0 1439.063 l +h +B +Q +Q +BT +2237.167 745.925 Td +(same as processing_red)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +0.82 0.87 0.93 rg +1 w +q 1 0 0 1 -150 -1102.5 cm +0 0 m +-73.167 0 -132.5 -16.792 -132.5 -37.5 c +-132.5 -58.208 -73.167 -75 0 -75 c +73.166 -75 132.5 -58.208 132.5 -37.5 c +132.5 -16.792 73.166 0 0 0 c +h +B +Q +Q +BT +339.833 1736.394 Td +(Start)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +0.27 1 0 rg +1 w +/GS1 gs +q 1 0 0 1 -472.5 -645.531 cm +0 0 m +910.25 0 l +910.25 495.531 l +0 495.531 l +h +B +Q +Q +BT +/T1_0 17 Tf +0 1 -1 0 45.8667 899.5344 Tm +(Get frames from picamera)Tj +ET +q +1 0 0 -1 502.5 600.46875 cm +0.82 0.87 0.93 rg +1 w +/GS2 gs +q 1 0 0 1 225 -1229.5313 cm +0 0 m +390 0 l +390 119.063 l +0 119.063 l +h +B +Q +Q +Q +BT +/T1_1 27 Tf +815.976 1801.802 Td +(mp_vorbereitung)Tj +/C2_0 27 Tf +<0003>Tj +/T1_1 27 Tf +63.723 -40.5 Td +(V06_04)Tj +ET + +endstream endobj 29 0 obj [31 0 R] endobj 30 0 obj <>stream +H\j0 ~ +C]C`t rZ[I l琷&AO/- ua縰Cq ޕM6)-ps>xT9Kw-)}ㄔ@ӀAn']SEy= \BUo=:dK#H5PJ5 +wrq?ۘq=xr );(A#o +endstream endobj 31 0 obj <> endobj 32 0 obj <> endobj 33 0 obj <> endobj 34 0 obj <>stream +H` +endstream endobj 35 0 obj <>stream +H|U tW9CDׁܒyn؊k1vMu. [e~c{jAq`sYrdF><'A]{W(#~e=& &lJxj@Y2|xa4<'Y(n@{R@e*FZc k "U /Ou{؃ u&{B(C=4a`4VS:-`()RZc[7oa*|&>CFpYzaAnkȱޅU~x<3X#4zZ۠%cYǬ'RLG,Aҭ<jgA rv28SCb,&?*leMC1*')2 uhΌ7 5q>e{NS[q{G>CtLK~۴Zo?Id`=p]QaU`XqjGŰϋ01_̗gхhg09"p7EQ_$ZNwgY+)Rtda>)X~7ʤ)4~GQ0m@BX5i1cku +؋Z|/q +THɤ-|D)JjI#ryLPXV_iKlnf +.OϪ `1`.؄8ҿ׸ߏri"kIS=d+z^o-jm쁧czr9'a}-${sF܀s):1>Ah8H㩀*ޢi ml l=N Y- +"Q%<⼸ yt8(\9NNc|=\ngYy]~/9j!wU絩<7hG:^{=хG])VMe~c;gg/He"k.ڨ +jvВ-8dqU@x[S/=WAЅMk] c_9M;)2qHTԊd1Vk8gc%LFKQ/9Vf"$["Jw@c"~rP\ :B9; m#Ҭ$w#7wz*øgPK:w^5wOpFr')RԷV/++ [ +1+gaxt_%I\ՙE>^㮷2Bk5GO}"|_?m|&RGJzhʵevV;ۋ3 +g/[zslooƞ##Q5ۙxd)콵\χ6p#@BآDNy݁JѴ߹B}oI 3-a}@zQd;L#c8KRfJ)g1IQ6)cF ?_>{x{Rb]b޹SL(gEF n:U`ˀ~>v)) aƸL2$K;|~e|cp5?LaÙ3?h$#9!HwiOyӑceFzh0e.'Ms&ģ׏~3C527"$o2t3"0etw9*;oRZc G2ijL=ʹ51`Q_Wt &٦txucN3d,<(-YZdx*ìm7'e[1UeM,6i14xzj_#{b>TGa_W}pTW?{I)lA6*_!6 !%! iCL : 2LTI7D(‡ u?jlg4" +B[Ӑb[N~9s?9w?/^/g^":Ȍdk(R|*=OoZ4.L]XhΥDf%( $@Lif%Ê<PZG,oC`@񚵶F!.2OT)Gss3g2ER)||Tms[~V kEHf&/p[,H D[*CVO , GzKZ [ZK{}L>Md'&{NYtQT!|e._s[^=f'lv):4dvIP\h̕Ĩ?]z]T)BEK'fx\b߹f{-__8>λryu]$2V() "1!"} Dd˒zgAcf[FlWA<-2- H=Aue]ϋP,ɥ={%(2kY?W\{eh$jVU(D1&yD1yDA4ce :U2hiYӍ\B:PIѵ1Y9#e<Ѵ\w+-703535 $x:WA> 'Afh?\|ǓoW7߅fg+N`<:VA^ykmFŔ_>RX;Ku_i lRA|&}.ǁG@6Xec%}])G+/b.kZA}5sAT=(wBrZzq\ׁAMoh&1,i12qiW[kv! ~h@@Iڬ=IKYMY4 aUzKuyrz|؈;yܠBQ 3As{Rkh{ 30tn+=_XZKoDN 9@̆|r +=7Xm0 +ڧ3MsC;+}b{3_A@:0C?Ikl@[Os36s9HM8NKũ}[}qY}TD7Gr^kyOr+<{gEG:H7c{aP/R*fȽ:ģKɥ'd6byR$M6k$׺2Rܲdm@uohCx0BH{`'WlbF yuf5HEZ)QD}8wwc0ceQ@sp?<>rq.Kt,3|BjwpƟR1|hkK~&I4y,n]-ا݉G>s>}tߏ:{_@8c#J̼=zM9`~<6 9;5nesjٴ>:E[/Zqތ0^}C>1e2[zu%UnCzlO-U;MAUÔM1ecVGEC!^+Cck=QqSg,Ok +6ю~a'ߥ|8K\=yW}apZ +4Me{h'F0^jdeҘ#Z'WhM8x7`6X6|hc }x=4bih>U}Ԋdg>GT_ ?m埈E?yzȲXNEC:`#eIXꮫ;V`;d> `8Ρob8).`? +O?:67x{PblZp獷 װ6f?CF) }ʑ/C_~IumT7m)ӇY<1:9cm +LFloHgI9\gXu|C?{oWrΓu/ 2?J.M 1!]u7 FSG?!EqhDXf$*Hh?G,sJawOsv_+J|s=yqg'&cMCL>D/[j: e_r]*Y|Ii-W E*~RJPUh%o]/u h_~߇í=x\-if6]cIGmRg㫅l?Oa { OMh՟s!DyFiq8մI<ʨ%yYvƒate/ckx$aΑI#a%ϻKh~5[N[~hzh 7~6!cU==Ojq&|jNM!>߳ kߕwv֭%[G~xiFSVGeV{,YSY+{{E5F^l k$qO*i2SJ~=G)VdjA*%>][srorYi>]c]>)rŎ)|tG/*\|t?u./g-<)-Ôx ~iK:rW:hiݑWެt-u/irΛ'u5^sz\9\c-Z+GA-pW5@s\ maɿQ8jgzR$A-SÝeszߪ>5÷8(Cao +ȼz 1P!-Xrϛ=ʥKf"+y-gIm9z̶e"q~z6f1ldrN黂w\-ߗe5eF'}K0\Mﲶμ-}}!L!_El9BH Gp6} .3j9)8"2(Y'KS#?bGڬ;#~w_\|+=w8l7=x E!(b%gK#:cIIvf4ʾcN<s}3ak~n}q?wd(]Lr޻3߳KW9Ko{ 4*)[yG$ymu+EcYx +{+`(`>htE%7^=栢Z1/˰3ޑT{Y-׿x'Xs?Mm!<#?̥.f!^=&ALsFLW6vǒDs:_!}G&y9:vvB˻I}_>(a»].e +4O#Jp,FSC= sd8џu/e\eu%Ae)%!"HC ,v (@֘`!tmI)J(F0_累Lozpv5UE5٧I߃4gjC"uħۉqQlQ~_CA֓%QH.i'(_&n[rI+h5X7"]k{LY)˅ 6xZ7ƴ˶ bw PWI"?.u6c&TI^m`)- mIMO޵ SֆEK>+_ZKߒg͐1csއ|9㬒lZP5<߶X7߭n6kV&o kw*÷dκ1~N +no2Jqi :JZIZH&$#$ʼn)SX)7Hy fD[7/V?ilV8>uun{Ժȝ}tpan|bߊH^<4mX}+uR?~Nу?dⲑ2Z|*c9ZT4R A~ƺDE܆uϋ)At3O#>LmzW(  4qXl†&6{F&;HG|.KAڍN}՘t!m^%>Jޛ,4gjk7hwˎ:_<}'utVIk|J[޺[]+;=yu_7qNc1_4ktV;+>n +xNqfRvvY8 +\gqܴxh"7m:R~Bi׋gD') +U+!)0X=X|+!Y)N` pc@"ƏH/b{ w]Pl/|i3}VKCݷ/OHer_V w:cYʝRn-j> fh{3r>*uO@;BJ6R죒\盾j[13Xk$d&Xuun E)%הK٧d10 R!@| dB o__RC; LWXRnִߘKͺ :>+ Z6Xo9f/>ۤemg -;0XeO#N A{hqLɹ-[ cK-lN^TƷ =-wiފf b~(ckQh\7jd'Ghĥpl{ܢCL\X6~:o/)~Ú[si}v?dSfXg1>X:%yI)o 7e]һ5IMv2\N/J٬F5ԡJmܖN +|Ƈ跣lq7;gS^hJu+cy\A7EZ!JDbvwId͈) lF`"3]@M%}OZre"9bm?p[qggk cc 88Q*>N3gc+4jޓ_ An6N6b_)0..n~ lQ}Oz<4 wA-ZmC?%mCK},J?,o~jǹKe)SWvɶJelw7m]r󿳲ɟ2F|Tz- <Mxk;23=%t_k2w;dLxw g&<@z.T'ѾNX`k;ʜuoM'KWX:So76|GW=Mcj_sO2Ax7ӘtOrdߐXnI["?b=JkQQ& ƀĆ51K-FAυg㛇/3b,megp}@r{ͬyRR1F:-ڗљ(uඊqɿ6/&3? $1BB^7QHH06t +,(scI,G36 Ц%a, nЎ/Q>[IjJZmڨZUUEeդMTi X'j&m:[y{XS5ހ5B΀=8UvO'y/eM]SwQ)}zy*ؔ=ׄx_~oJC OABLvt9(o]39Kk^E!amCaRo:qi)ls_\e zµ{ҶqEĹ밙>| nOֺԽk)U8_p\Qg|Ϋ_)BKx>@}9j3ϳ^Bȿ.jSGcf\ϹC\w`6vؚ'b~qf15TU#9RWقFIqFAD, +xv=؏IK'Q%&qI\w"i&w>;2bWma1䧜qձ6bnCa=(!cەlmA*[gTscNC=IکhL6J(k1e-A$كRFLHUeF& [yO`ܩq6$|(==qč۩ZR𐂭a#}Dhm{/J9Ę1'G\ZrZdmXeXg*(9F|e7jr O$S*KTSY41e*숦lϠF/ucoCgE =3BPYg45IQƼUͮ_=O {G b}2nKbe2JX2PqfD1хlٗ6EwLdum\Vē$SҲ;E]-'O D$ي{ :ȑB?+b}X$IbPI?;N"iXb31i +sόFͨkEDұ#jfñxfawMt,o]Gׇ>^ehaMP8U$o/Ur+ Ђ[nq2Bjs\;n=GnNg})< ~*n0v3t WNLS)i,&թ[w'i uu>WsIQu(iM+6{|~#XnmQuoCEF@']P?ŏZ x8GA?ApDs9NkXdUϏW^EQb(?by o4; ޿|7_QQi0݋n{|ڡR6_Z~A"Q yX@rH|Ut5b}QQ{k/в 6m˔4A۪BBǕ2wHwt._R.#*Ym~F>ggj >_M%_[/;,l |VqiбF.>iVq6h,7AcL{)]FYʟYQ&kC^Jp{ζw4&xmvNF~i9[(G!!;!_x8!AFNl-hVfJe?.h\r.Kl>"d-$ҫ?]k s-Ξg?Őy\pHqrKqڢbiCqf':>okTLgY #͎?0%%UVt2f8ccڮ@S5p7I.qBln /# FzĶq\a|fEsWeQQY]6%Ha !4<`;ځ$#4$ـ +j"q J% +,@ȱ0<)@摒]TK} goh i@R9*<%1猪nyH + +К!1 T*I D D D%^@x <"<G#@DDDD!p@8D8     "   D$@$@$0A L&& $a0""Dק +) MDDDD66Ti"EH H H{Z fZV!v^UH!AH"$ BDH$FD D D Dm*)npV[S+ 2#?f$c< +Er =[<[}~$=t=ksd`@C؆ֵ|^p#0xl#jfzL(n-S*WP>AZJEYgfoͧ1(F^5;ɒ&]D؃ܙn>#/&ؔcß@uh %(Y]g;6& S`##Az~֫DOҏ&`M?:.ҽ|Eկ"9//,m?zvŏKLz 8o|&.b +fш!)pK)sI_U σ,N;)i`BOqx,,ǿfj7>+Ox<]?mŭMgqZ܍7 4oS +_4*,uqM\o[hUɊl kVpDT5;&jL'1&tbw~E>Rׇ!#d4N}anaCʂUT%ӸSГ ;uS~noc Cy/I& W:wh]v|_5Cݺ38;" 50x-gq+?/U/󪻥 hvRY +%(e{La'Ƣfdp?ɨ0\N\8&!E(.Oq{f.[7Mۣ==6[D(*lrWEq7lĦh !3BnabD'fA)籘yc[ pn>xwT^7rUu']]e5Y={WuOuwŬ?LײF꼛\sUR#Ӫ;rUʕrR+nLohbf$T6ֹ/g<~Hu:n򹉱/nW̓ ;XZrb^N,媭"kG_:{+$|d#QGZ-jB^t3uE>h顶zooNNlnc>uZ-X5Pi8>Wx@xӖ3ԘKqfx]U81 $PVǝ8*kZ|-YjZ2u4u(Ϭ]>XT+E,68%m2~erpA: څJPgηQz(ɪ# +endstream endobj xref +0 1 +0000000000 65535 f +3 1 +0000022091 00000 n +23 13 +0000022324 00000 n +0000022572 00000 n +0000022686 00000 n +0000022797 00000 n +0000022931 00000 n +0000026101 00000 n +0000038669 00000 n +0000038694 00000 n +0000038991 00000 n +0000057273 00000 n +0000057342 00000 n +0000057608 00000 n +0000057688 00000 n +trailer +<]/Prev 21412>> +startxref +71836 +%%EOF +3 0 obj <>/Font<>/ProcSet[/PDF/Text]/XObject<>>>/Type/Page>> endobj 23 0 obj <>>>/Subtype/Form/Type/XObject>>stream +0 TL +q +q +1 0 0 1 5 -5 cm +0 g +0 G +10 M +/GS0 gs +0 0 m +-5 5 l +0 10 l +h +B +Q +Q + +endstream endobj 24 0 obj <> endobj 27 0 obj <>stream + + + + + 2022-02-28T18:20:20+01:00 + 2022-05-04T09:25:22+02:00 + 2022-05-04T09:25:22+02:00 + jsPDF 2.3.1 + application/pdf + uuid:3224bb93-0008-4556-8c5d-c39429a4ac86 + uuid:3840d3de-7740-4876-a957-61f878c67dfc + + + + + + + + + + + + + + + + + + + + + + + + + +endstream endobj 36 0 obj <>stream +q +1 0 0 -1 0 1860 cm +0 0 2483 1860 re +W n +1 g +0 0 m +2483 0 l +2483 1860 l +0 1860 l +h +f +1 0 0 1 352.5 418.59375 cm +1.5 w +0 0 m +0 25.875 l +S +q +0 -1 1 0 0 30.16789 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 1 0 -102.5312 cm +0 0 m +0 38.406 l +S +q +0 -1 1 0 0 42.69909 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 1 0 -159.0313 cm +0 0 m +0 33.969 l +S +q +0 -1 1 0 0 38.26164 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 1 0 351.5625 cm +0 0 m +0 25.875 l +S +q +0 -1 1 0 0 30.16789 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 1 0 90 cm +0 0 m +0 25.406 l +S +q +0 -1 1 0 0 29.69914 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 1 148.75 90.9375 cm +0 0 m +56.5 0 l +S +q +-1 0 0 -1 60.79289 0 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 1 -148.75 59.5313 cm +0 0 m +0 25.406 l +S +q +0 -1 1 0 0 29.69909 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 1 0 269.5312 cm +0 0 m +0 50.938 l +0 57.604 -3.333 60.938 -10 60.938 c +-200 60.938 l +-206.667 60.938 -210 57.604 -210 50.938 c +-210 -319.062 l +-210 -325.729 -206.667 -329.062 -200 -329.062 c +-154.75 -329.062 l +S +q +-1 0 0 -1 -150.45711 -329.0625 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 1 148.75 -209.0625 cm +0 0 m +56.5 0 l +S +q +-1 0 0 -1 60.79289 0 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 1 -148.75 29.0625 cm +0 0 m +0 25.875 l +S +q +0 -1 1 0 0 30.16789 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 1 0 90 cm +0 0 m +0 25.875 l +S +q +0 -1 1 0 0 30.16789 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 1 148.75 60.9375 cm +0 0 m +56.5 0 l +S +q +-1 0 0 -1 60.79289 0 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 1 811.25 -720.9375 cm +0 0 m +0 115.875 l +S +q +0 -1 1 0 0 120.16789 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 1 600 0 cm +0 0 m +0 25.875 l +S +q +0 -1 1 0 0 30.16789 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 1 390 0 cm +0 0 m +0 25.875 l +S +q +0 -1 1 0 0 30.16789 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 1 -990 180 cm +0 0 m +0 25.875 l +S +q +0 -1 1 0 0 30.16789 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 1 -148.75 690.9375 cm +0 0 m +-51.25 0 l +-57.917 0 -61.25 -3.333 -61.25 -10 c +-61.25 -710 l +-61.25 -716.667 -57.917 -720 -51.25 -720 c +-6 -720 l +S +q +-1 0 0 -1 -1.70711 -720 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 1 0 660 cm +0 0 m +-81.25 0 l +-87.917 0 -91.25 -3.333 -91.25 -10 c +-91.25 -1370 l +-91.25 -1376.667 -87.917 -1380 -81.25 -1380 c +-6 -1380 l +S +q +-1 0 0 -1 -1.70711 -1380 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 1 -1.25 -510 cm +0 0 m +-80 0 l +-86.667 0 -90 -3.333 -90 -10 c +-90 -860 l +-90 -866.667 -86.667 -870 -80 -870 c +-4.75 -870 l +S +q +-1 0 0 -1 -0.45711 -870 cm +0 g +1 w +/GS0 gs +/Fm0 Do +Q +1 0 0 -1 -1162.5 570.46875 cm +BT +0 g +/T1_0 12 Tf +1110.833 578.469 Td +(no)Tj +ET +q +1 0 0 -1 1312.5 1260.46875 cm +0 0 m +0 24.469 l +S +Q +q +0 1 1 0 1312.5 1231.70711 cm +1 w +/GS0 gs +/T1_0 12 Tf +/Fm0 Do +Q +q +1 0 0 -1 1312.5 1110.93745 cm +0 0 m +0 24.719 l +S +Q +q +0 1 1 0 1312.5 1081.92586 cm +1 w +/GS0 gs +/T1_0 12 Tf +/Fm0 Do +Q +q +1 0 0 -1 1312.5 1020.46875 cm +0 0 m +0 24.937 l +S +Q +q +0 1 1 0 1312.5 991.23836 cm +1 w +/GS0 gs +/T1_0 12 Tf +/Fm0 Do +Q +BT +1279.417 1003.305 Td +(yes)Tj +ET +q +1 0 0 -1 1462.5 1050.34375 cm +0 0 m +50 0 l +56.667 0 60 3.333 60 10 c +60 409.875 l +60 416.542 56.667 419.875 50 419.875 c +-140 419.875 l +-146.667 419.875 -150 423.208 -150 429.875 c +-150 444 l +S +Q +q +0 1 1 0 1312.5 602.05086 cm +1 w +/GS0 gs +/T1_0 12 Tf +/Fm0 Do +Q +BT +1485.833 1034.344 Td +(no)Tj +ET +q +1 0 0 -1 1312.5 870.46875 cm +0 0 m +0 24.937 l +S +Q +q +0 1 1 0 1312.5 841.23836 cm +1 w +/GS0 gs +/T1_0 12 Tf +/Fm0 Do +Q +q +1 0 0 -1 1312.5 781.40625 cm +0 0 m +0 25.875 l +S +Q +q +0 1 1 0 1312.5 751.23836 cm +1 w +/GS0 gs +/T1_0 12 Tf +/Fm0 Do +Q +q +1 0 0 -1 1312.5 540.59375 cm +0 0 m +0 25.062 l +S +Q +q +0 1 1 0 1312.5 511.23836 cm +1 w +/GS0 gs +/T1_0 12 Tf +/Fm0 Do +Q +BT +1288.167 518.248 Td +(yes)Tj +ET +q +1 0 0 -1 1312.5 390.46875 cm +0 0 m +0 24.937 l +S +Q +q +0 1 1 0 1312.5 361.23836 cm +1 w +/GS0 gs +/T1_0 12 Tf +/Fm0 Do +Q +q +1 0 0 -1 1312.5 301.40625 cm +0 0 m +0 25.875 l +S +Q +q +0 1 1 0 1312.5 271.23836 cm +1 w +/GS0 gs +/T1_0 12 Tf +/Fm0 Do +Q +q +1 0 0 -1 1312.5 210.46875 cm +0 0 m +0 24.469 l +S +Q +q +0 1 1 0 1312.5 181.70711 cm +1 w +/GS0 gs +/T1_0 12 Tf +/Fm0 Do +Q +q +1 0 0 -1 1312.5 120.93755 cm +0 0 m +0 24.937 l +S +Q +q +0 1 1 0 1312.5 91.70711 cm +1 w +/GS0 gs +/T1_0 12 Tf +/Fm0 Do +Q +q +1 0 0 -1 1912.5 1501.40625 cm +0 0 m +0 25.875 l +S +Q +q +0 1 1 0 1912.5 1471.23836 cm +1 w +/GS0 gs +/T1_0 12 Tf +/Fm0 Do +Q +q +1 0 0 -1 2302.5 1501.40625 cm +0 0 m +0 25.875 l +S +Q +q +0 1 1 0 2302.5 1471.23836 cm +1 w +/GS0 gs +/T1_0 12 Tf +/Fm0 Do +Q +q +1 0 0 -1 203.75 1499.53125 cm +0.82 0.87 0.93 rg +1 w +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +BT +339.5 1466.394 Td +(Main)Tj +ET +q +1 0 0 -1 203.75 1663.00005 cm +1 0.64 0.64 rg +1 w +0 0 m +297.5 0 l +297.5 119.063 l +0 119.063 l +h +B +Q +BT +282.167 1644.394 Td +(create shared memory for:)Tj +52.333 -18 Td +[(-)-278 (bools)]TJ +-43.333 -18 Td +[(-)-278 (framenumber \(uint64\))]TJ +30.333 -18 Td +[(-)-278 (red_frame)]TJ +-5 -18 Td +[(-)-278 (green frame)]TJ +4 -18 Td +[(-)-278 (blue frame)]TJ +ET +q +1 0 0 -1 203.75 1409.53125 cm +1 0.64 0.64 rg +1 w +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +BT +300.167 1385.394 Td +(create 3 processes:)Tj +-18.333 -18 Td +(one for each color channel)Tj +ET +q +1 0 0 -1 203.75 1319.53125 cm +1 0.64 0.64 rg +1 w +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +BT +263.5 1286.394 Td +(start activity of created processes)Tj +ET +q +1 0 0 -1 203.75 1229.99995 cm +0.82 0.87 0.93 rg +1 w +0 0 m +297.5 0 l +297.5 119.063 l +0 119.063 l +h +B +Q +BT +259.408 1184.394 Td +(get frames @25 fps from Picamera)Tj +38.758 -18 Td +(as raw NumPy array)Tj +-8 -18 Td +(without JPEG encoding)Tj +ET +q +1 0 0 -1 563.75 1229.99995 cm +0.82 0.87 0.93 rg +1 w +0 0 m +297.5 0 l +297.5 119.063 l +0 119.063 l +h +B +Q +BT +614.5 1184.394 Td +(always the newest frame is recieved:)Tj +6.333 -18 Td +(processing must be faster than fps)Tj +-1.667 -18 Td +(if every frame should be processed)Tj +ET +q +1 0 0 -1 203.75 1079.53125 cm +1 0.64 0.64 rg +1 w +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +BT +292.417 1046.394 Td +(increase framenumber)Tj +ET +q +1 0 0 -1 563.75 1079.53125 cm +1 0.64 0.64 rg +1 w +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +BT +625.892 1055.394 Td +(shm_bools[0] = True \(newframe\))Tj +-12.975 -18 Td +(shm_framenumber[0] = framenumber)Tj +ET +q +1 0 0 -1 203.75 989.53125 cm +0.82 0.87 0.93 rg +1 w +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +BT +256.833 965.394 Td +(split frame into: b, g, r - components)Tj +13.333 -18 Td +(and allocate to shared memory)Tj +ET +q +1 0 0 -1 203.75 899.53125 cm +1 0.64 0.64 rg +1 w +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +BT +242.125 875.394 Td +(set trigger to "True" for each colorchannel)Tj +42.708 -18 Td +(for start of the processing)Tj +ET +q +1 0 0 -1 563.75 899.53125 cm +1 0.64 0.64 rg +1 w +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +BT +647.892 866.394 Td +(shm_bools[7,8,9] = True)Tj +ET +q +1 0 0 -1 1163.75 1649.53125 cm +0.51 0.95 1 rg +1 w +0 0 m +1288.75 0 l +1288.75 58.125 l +0 58.125 l +h +B +Q +BT +1766.458 1616.394 Td +(Multiprocessing)Tj +ET +q +1 0 0 -1 1163.75 1469.53125 cm +1 0.64 0.64 rg +1 w +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +BT +1263.083 1445.394 Td +(read framenumber)Tj +-7.583 -18 Td +(shm_framenumber[0])Tj +ET +q +1 0 0 -1 1163.75 1379.53125 cm +1 0.64 0.64 rg +1 w +0 0 m +297.5 0 l +297.5 119.063 l +0 119.063 l +h +B +Q +BT +1250.5 1351.925 Td +(conditions for first start:)Tj +46.833 -18 Td +[(-)-278 (i = 0)]TJ +-73.442 -18 Td +[(-)-278 (shm_bools[0]= True \(newframe\))]TJ +-27.225 -18 Td +[(-)-278 (shm_bools[1,2,3] = False \(p_rgb_finished\))]TJ +-3 -18 Td +[(-)-278 (shm_bools[7] = TRUE \(p_red_start_trigger\))]TJ +ET +q +1 0 0 -1 1163.75 1229.99995 cm +1 0.64 0.64 rg +1 w +0 0 m +297.5 0 l +297.5 119.063 l +0 119.063 l +h +B +Q +BT +1231.167 1184.394 Td +(conditions for start processing:)Tj +-23.833 -18 Td +[(-)-278 (framenumber > last_processed_frame)]TJ +-13.667 -18 Td +[(-)-278 (shm_bools[7] = TRUE \(p_red_start_trigger\))]TJ +ET +q +1 0 0 -1 1312.5 1080.21875 cm +0.82 0.87 0.93 rg +1 w +0 0 m +150 29.875 l +0 59.75 l +-150 29.875 l +h +B +Q +BT +1233.225 1046.269 Td +(conditions for first start = True)Tj +ET +q +1 0 0 -1 1132.5 989.53125 cm +1 0.64 0.64 rg +1 w +0 0 m +360 0 l +360 119.063 l +0 119.063 l +h +B +Q +BT +1302.833 952.925 Td +(set:)Tj +-91.275 -18 Td +[(-)-278 (shm_bools[4] = True \(p_red_started\))]TJ +-53.2 -18 Td +[(-)-278 (shm_bools[7] = False \(p_red_start_trigger\) | reset trigger)]TJ +48.308 -18 Td +[(-)-278 (shm_bools[1] = False \(p_red_finished\))]TJ +ET +q +1 0 0 -1 1163.75 839.53125 cm +0.82 0.87 0.93 rg +1 w +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +BT +1275.167 806.394 Td +(do processing)Tj +ET +q +1 0 0 -1 1163.75 749.53125 cm +1 0.64 0.64 rg +1 w +0 0 m +297.5 0 l +297.5 89.063 l +0 89.063 l +h +B +Q +BT +1297.5 727.925 Td +(i += 1)Tj +3.667 -18 Td +(set: )Tj +-91.942 -18 Td +[(-)-278 (shm_bools[1] = True \(p_red_finished\))]TJ +-0.225 -18 Td +[(-)-278 (shm_bools[4] = False \(p_red_started\))]TJ +ET +q +1 0 0 -1 1312.5 600.34375 cm +0.82 0.87 0.93 rg +1 w +0 0 m +150 29.875 l +0 59.75 l +-150 29.875 l +h +B +Q +BT +1215.667 566.394 Td +(conditions for start processing = true)Tj +ET +q +1 0 0 -1 1132.5 509.53125 cm +1 0.64 0.64 rg +1 w +0 0 m +360 0 l +360 119.063 l +0 119.063 l +h +B +Q +BT +1302.833 472.925 Td +(set:)Tj +-91.275 -18 Td +[(-)-278 (shm_bools[4] = True \(p_red_started\))]TJ +-53.2 -18 Td +[(-)-278 (shm_bools[7] = False \(p_red_start_trigger\) | reset trigger)]TJ +48.308 -18 Td +[(-)-278 (shm_bools[1] = False \(p_red_finished\))]TJ +ET +q +1 0 0 -1 1163.75 359.53125 cm +1 0.96 0.32 rg +1 w +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +BT +1275.167 326.394 Td +(do processing)Tj +ET +q +1 0 0 -1 1163.75 269.53125 cm +1 0.64 0.64 rg +1 w +0 0 m +297.5 0 l +297.5 59.063 l +0 59.063 l +h +B +Q +BT +1212.892 235.925 Td +(shm_bools[1] = True \(p_red_finished\))Tj +ET +q +1 0 0 -1 1163.75 180.00005 cm +1 0.64 0.64 rg +1 w +0 0 m +297.5 0 l +297.5 59.063 l +0 59.063 l +h +B +Q +BT +1210.917 146.394 Td +(last_processed_frame = framenumber)Tj +ET +q +1 0 0 -1 1163.75 90.00005 cm +1 0.64 0.64 rg +1 w +0 0 m +297.5 0 l +297.5 59.063 l +0 59.063 l +h +B +Q +BT +1212.667 56.394 Td +(shm_bools[4] = False \(p_red_started\))Tj +ET +q +1 0 0 -1 1163.75 1559.53125 cm +0.82 0.87 0.93 rg +1 w +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +BT +1271.5 1526.394 Td +(processing_red)Tj +ET +q +1 0 0 -1 1763.75 1559.53125 cm +0.82 0.87 0.93 rg +1 w +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +BT +1864.833 1526.394 Td +(processing_green)Tj +ET +q +1 0 0 -1 2153.75 1559.53125 cm +0.82 0.87 0.93 rg +1 w +0 0 m +297.5 0 l +297.5 58.125 l +0 58.125 l +h +B +Q +BT +2258.833 1526.394 Td +(processing_blue)Tj +ET +q +1 0 0 -1 1763.75 1469.53125 cm +0.82 0.87 0.93 rg +1 w +0 0 m +297.5 0 l +297.5 1439.063 l +0 1439.063 l +h +B +Q +BT +1847.167 745.925 Td +(same as processing_red)Tj +ET +q +1 0 0 -1 2153.75 1469.53125 cm +0.82 0.87 0.93 rg +1 w +0 0 m +297.5 0 l +297.5 1439.063 l +0 1439.063 l +h +B +Q +BT +2237.167 745.925 Td +(same as processing_red)Tj +ET +q +1 0 0 -1 352.5 1702.96875 cm +0.82 0.87 0.93 rg +1 w +0 0 m +-73.167 0 -132.5 -16.792 -132.5 -37.5 c +-132.5 -58.208 -73.167 -75 0 -75 c +73.166 -75 132.5 -58.208 132.5 -37.5 c +132.5 -16.792 73.166 0 0 0 c +h +B +Q +BT +339.833 1736.394 Td +(Start)Tj +ET +q +1 0 0 -1 30 1245.99975 cm +0.27 1 0 rg +1 w +/GS1 gs +0 0 m +910.25 0 l +910.25 495.531 l +0 495.531 l +h +B +Q +BT +/T1_0 17 Tf +0 1 -1 0 45.8667 899.5344 Tm +(Get frames from picamera)Tj +ET +q +1 0 0 -1 727.5 1830.00005 cm +0.82 0.87 0.93 rg +1 w +/GS2 gs +0 0 m +390 0 l +390 119.063 l +0 119.063 l +h +B +Q +Q +BT +/T1_1 27 Tf +810.74 1766.456 Td +(mp_vorbereitung)Tj +/C2_0 27 Tf +<0003>Tj +ET + +endstream endobj xref +0 1 +0000000000 65535 f +3 1 +0000072315 00000 n +23 2 +0000072548 00000 n +0000072796 00000 n +27 1 +0000072910 00000 n +36 1 +0000076079 00000 n +trailer +<]/Prev 71836>> +startxref +87242 +%%EOF diff --git a/02_Kameraaufnahme/mp_Vorbereitung/PiCameraVideoPort_mp.py b/02_Kameraaufnahme/mp_Vorbereitung/PiCameraVideoPort_mp.py new file mode 100644 index 0000000..359e40c --- /dev/null +++ b/02_Kameraaufnahme/mp_Vorbereitung/PiCameraVideoPort_mp.py @@ -0,0 +1,594 @@ +# Creation Date: 14.01.2022 +# Author: Kenan Gömek +# This script takes pictures with Picameras VideoPort like it will be used to work with OpenCV and saves it with OpenCV to have the real use case pictures. +# This script is designed for capturing a Video with the frame number in it. press "q" to quit. +# You can take images with "i". + +# Update: 28.02.2022 +# This program is a development step for the final program +# This program works with multiprocessing and with Picamera +# create shared memorys once before exectution instead of twice: in main and take_image_picamera_opencv + +import cv2 as cv +import numpy as np + +import picamera +from picamera.array import PiRGBArray +from fractions import Fraction + +import time +from datetime import datetime +import os +import sys + +from multiprocessing import Process, shared_memory + + +# Define camera settings + +# divide origin resoluton by a number, to have the origin aspect ratio +# RESOLUTION = (3280, 2464) # Max Photo-Resolution CAM03 and CAM04 # no image with PiCamera Videoport at this Resolution.. Probably GPU Memory and CPU issues. +# RESOLUTION = (1640,1232) # 2nd best Resolution for CAM03 and CAM04 with FUll FOV (2x2 binning) # Mode 4 +SENSOR_MODE = 4 # corresponding sensor mode to resolution +OUTPUT_RESOLUTION = (416, 320) # (width, heigth) +image_width = OUTPUT_RESOLUTION[0] +image_heigth = OUTPUT_RESOLUTION[1] + # (410,308) is being upscaled to (416,320) from ISP (warning in bash), but image will have still (410,308) pixels. +# OUTPUT_RESOLUTION = (820, 616) # (1640x1232)/2=(820,616) + # bash: frame size rounded up from 820x616 to 832x624 +number_of_colorchannels = 3 # r, g, b +size_of_frame=int(image_heigth*image_heigth) +frame_dimension = int(1) + +AWB_MODE = 'off' # Auto white balance mode +AWB_GAINS = (Fraction(485, 256), Fraction(397, 256)) # White Balance Gains to have colours read correctly: (red, blue) +ISO = 100 # ISO value +EXPOSURE_MODE = 'off' +FRAMERATE = 30 # frames per second. 40 fps is max for sensor mode 4 + +SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s + +# miscellaneous parameters +max_value_of_uint64 = int((2**64) - 1) # @30 fps: konservative calculated driving time: 1.95*1e10 years --> Integer overflow not relevant. + +# settings for development +show_opencv_window = False + +# create shared Memorys for main-process +# shared memory for bools +shm_bools_pre=np.array([False, False, False, False, False, False, False, False, False, False], dtype=np.bool8) # create numpy array with bools stored in it + # [0]: newframe [1]: p_red_finished [2]: p_green_finished [3]: p_blue_finished + # [4]: p_red_started [5]: p_green_started [6]: p_blue_started + # [7]: p_red_start_trigger [8]: p_green_start_trigger [9]: p_blue_start_trigger +size_of_buffer = shm_bools_pre.nbytes +print(f"size of buffer: {size_of_buffer}") # size of buffer: 10 +print(f"shm_bools dtype: {shm_bools_pre.dtype}") # dtype: bool +shm_bools_create = shared_memory.SharedMemory(name="shm_bools", create=True, size=shm_bools_pre.nbytes) # create a new shared memory block +shm_bools = np.ndarray(shm_bools_pre.shape, dtype=shm_bools_pre.dtype, buffer=shm_bools_create.buf) # create a NumPy array backed by shared memory +shm_bools[:] = shm_bools_pre[:] # Copy the original data into shared memory + +# print(shm_bool) +# print(shm_bools.name) + +# shared memory for framenumber +shm_framenumber_pre=np.array([0], dtype=np.uint64) +size_of_buffer = shm_framenumber_pre.nbytes +print(f"size of framenumber-buffer: {size_of_buffer}") #8 +print(f"shm_framenumber dtype: {shm_framenumber_pre.dtype}") #uint64 +shm_framenumber_create = shared_memory.SharedMemory(name="shm_framenumber", create=True, size=shm_framenumber_pre.nbytes) # create a new shared memory block +shm_framenumber = np.ndarray(shm_framenumber_pre.shape, dtype=shm_framenumber_pre.dtype, buffer=shm_framenumber_create.buf) # create a NumPy array backed by shared memory +shm_framenumber[:] = shm_framenumber_pre[:] # Copy the original data into shared memory +# print(shm_framenumber) # [0] +# print(shm_framenumber_create.name) # shm_framenumber + +# shared memory for red, green, blue frame +int_black = 0 # integer for black color/ no color +shm_colorframes_pre = np.full(\ + (image_heigth,image_width), \ + int_black, dtype=np.uint8) +size_of_buffer = shm_colorframes_pre.nbytes +print(f"size of colorframe-buffer: {size_of_buffer}") #133 120 +print(f"shm_colorframes_pre dtype: {shm_colorframes_pre.dtype}") #uint8 +shm_redframe_create = shared_memory.SharedMemory(name="shm_redframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block +shm_greenframe_create = shared_memory.SharedMemory(name="shm_greenframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block +shm_blueframe_create = shared_memory.SharedMemory(name="shm_blueframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block +shm_redframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_redframe_create.buf) # create a NumPy array backed by shared memory +shm_greenframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_greenframe_create.buf) # create a NumPy array backed by shared memory +shm_blueframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_blueframe_create.buf) # create a NumPy array backed by shared memory +shm_redframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory +shm_greenframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory +shm_blueframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory + + +# ---------------------------------------------------------------------------- +# Define Funcions +def get_frames_from_picamera(shutter_speed): + # newframe= shm_bools[0] # do not use this! no updted values in "newframe" + # framenumber = shm_framenumber[0] + + # Initialise Camera + with picamera.PiCamera() as camera: + with PiRGBArray(camera) as output: + # Set camera settings + camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode + camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage! + camera.framerate = FRAMERATE + + camera.awb_mode = AWB_MODE + camera.awb_gains = AWB_GAINS + + camera.iso = ISO + camera.shutter_speed = shutter_speed + camera.exposure_mode = EXPOSURE_MODE + + time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings + + t_start= time.perf_counter() # save time for fps calculation + + for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)): + # General information: + # - always the newest frame is recieved: processing must be faster than fps if every frame should be processed + + shm_bools[0] = True + framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1 + + + shm_framenumber[0] = framenumber + #print('') + #print(f"new frame: {framenumber}") + + #image = frame.array # raw NumPy array without JPEG encoding + + b,g,r = cv.split(frame.array) # split colour channels of raw NumPy array without JPEG encoding + shm_redframe[:] = r + shm_greenframe[:] = g + shm_blueframe[:] = b + # for better performance one can assign directly in funtion line the values to the shm_memorys: shm_red, .. , ... = cv.split(..) + + shm_bools[7:10]=[True] # trigger the start of the processing for each colorchannel + #print(shm_bools[7], shm_bools[8], shm_bools[9]) + + #display_image_with_text(image, shutter_speed, framenumber, camera_exposure_speed, trigger_record_OpenCV, out) # show the frame + + output.truncate(0) # clear the stream for next frame + + if framenumber == 500: # 5 sek @ 30 fps, only for performance measuring + t_stop=time.perf_counter() + print(f"calculated fps: {framenumber/(t_stop-t_start)}") + break + + +def display_image_with_text(img, shutter_speed, framenumber, camera_exposure_speed, trigger_record_OpenCV, out): + img = img.copy() # make copy of image and do not modify the original image + + # please activate only one trigger once + trigger_show_brightness = 0 # trigger for (not) calculating andshowing the brightness of the image+ + if trigger_show_brightness == 1: + arithmetic_mean_of_brightness_per_pixel_relative = calc_arithmetic_mean_of_brightness_per_pixel(img) + + trigger_show_max_brightness_values_of_colour_channels = 0 # trigger for (not) calculating and showing max values of colour chanels + if trigger_show_max_brightness_values_of_colour_channels == 1: + r_max, g_max, b_max = get_max_rgb_values(img) + + font = cv.FONT_HERSHEY_SIMPLEX # font + fontScale = 1 # fontScale + color = (255, 255, 255) # Font colour in BGR + thickness = 1 # Line thickness in px + + # set text position + frame_width = int(img.shape[1]) + frame_height = int(img.shape[0]) + text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height + text_linespacing = 50 # line spacing between two strings in pixels + # text_start_position_X = int(frame_width/4) # start text from 1/4 of image width + text_start_position_X = int(0) # start text from left edge of image + + # set position in (x,y)-coordinated from top left corner. Bottom-left corner of the text string in the image. + pos_1 = (text_start_position_X, text_start_position_Y) # start text from 1/4 of image width + pos_2 = (text_start_position_X, text_start_position_Y+text_linespacing) # start text from 1/4 of image width + pos_3 = (text_start_position_X, text_start_position_Y+2*text_linespacing) # start text from 1/4 of image width + if trigger_show_brightness==1 or trigger_show_max_brightness_values_of_colour_channels==1: + pos_4 = (text_start_position_X, text_start_position_Y+3*text_linespacing) # start text from 1/4 of image width + + + # define text to display + text_line_1 = f"set ss: {shutter_speed} us" + text_line_3 = f"Frame: {framenumber}" + text_line_2 = f"ret exs: {camera_exposure_speed} us" + if trigger_show_brightness==1: + if arithmetic_mean_of_brightness_per_pixel_relative >= 0.01: + text_line_4 = f"brightness: {round(arithmetic_mean_of_brightness_per_pixel_relative*100,2)} %" + elif arithmetic_mean_of_brightness_per_pixel_relative < 0.01: + text_line_4 = f"brightness: {round(arithmetic_mean_of_brightness_per_pixel_relative*10e3,2)} pm" + if trigger_show_max_brightness_values_of_colour_channels==1: + text_line_4 = f"max: r:{r_max} g:{g_max} b:{b_max}" + + + # put the text into the image + image_text_1 = cv.putText(img, text_line_1, pos_1, font, + fontScale, color, thickness, cv.LINE_AA) + image_text_2 = cv.putText(img, text_line_2, pos_2, font, + fontScale, color, thickness, cv.LINE_AA) + image_text_3 = cv.putText(img, text_line_3, pos_3, font, + fontScale, color, thickness, cv.LINE_AA) + if trigger_show_brightness==1 or trigger_show_max_brightness_values_of_colour_channels==1: + image_text_4 = cv.putText(img, text_line_4, pos_4, font, + fontScale, color, thickness, cv.LINE_AA) + + + cv.imshow("Current Frame", img) # display the image + if trigger_record_OpenCV == 1: + out.write(img) # write frame to Video + + +def calc_arithmetic_mean_of_brightness_per_pixel(image): + """Calculate overall brightness per pixel of the image. Mittelere Helligkeit pro pixel berechnen.""" + #Comment: So rechenintensiv, dass man kein Blitzen sieht im Bild. (Oder sehr selten bzw. schwach). Daher anzeige von max-werten + + b,g,r = cv.split(image) # OpenCV works with bgr. Image is also speciefied to be captured in bgr. + r=r.astype('uint16') # set dtype of one colour to uint16, because the sum of 255+255+255 >255 =765 + #the rest will also be uint16 then + image_heigth = r.shape[0] + image_width = r.shape[1] + + number_of_colour_channels = 3 + arithmetic_mean_of_brightness_image = np.sum((r+g+b)/number_of_colour_channels) + arithmetic_mean_of_brightness_per_pixel = arithmetic_mean_of_brightness_image/(image_width*image_heigth) + + max_possible_brightness = 255 # maximum possible brightness + arithmetic_mean_of_brightness_per_pixel_relative = arithmetic_mean_of_brightness_per_pixel/max_possible_brightness + + return arithmetic_mean_of_brightness_per_pixel_relative + +def get_max_rgb_values(image): + """get max values of colour channels""" + b,g,r = cv.split(image) # OpenCV works with bgr. Image is also speciefied to be captured in bgr. + + r_max=r.max() + g_max=g.max() + b_max=b.max() + + return r_max, g_max, b_max + +def create_folder_for_captures(): + # Create folder for saving the captured pictures + now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M") + path_cwd = os.getcwd() + + path_saveFolder = path_cwd+r"/Capture_"+d1 + try: + os.mkdir(path_saveFolder) + folder_exists = True + except OSError: + print("Error! Ending script.") + quit() + + return path_saveFolder, folder_exists + +def do_processing(): + time.sleep(0.001) + #print("ohh i am doing high complex image analysis with computer vision") + +def do_processing_frame_r(frame): + print(f"max frame color red: {frame.max()}") +def do_processing_frame_g(frame): + print(f"max frame color green: {frame.max()}") +def do_processing_frame_b(frame): + print(f"max frame color blue: {frame.max()}") + + + +def processing_red(): + shm_bool_init = shared_memory.SharedMemory(name="shm_bools") # Attach to existing shared memory block + shm_bools = np.ndarray((10,), dtype=np.bool8, buffer= shm_bool_init.buf) # do not: newframe = np.array(...)[0] --> then one can not assign new value in main script + newframe = shm_bools[0] + p_red_finished = shm_bools[1] # not used, but for clarity + p_red_started = shm_bools[4] + + shm_framenumber_init = shared_memory.SharedMemory\ + (name="shm_framenumber") # Attach to existing shared memory block + shm_framenumber = np.ndarray((1,), dtype=np.uint64, \ + buffer= shm_framenumber_init.buf) + # framenumer = shm_framenumber[0] + + shm_redframe_init = shared_memory.SharedMemory\ + (name="shm_redframe") # Attach to existing shared memory block + shm_redframe = np.ndarray((image_heigth,image_width), dtype=np.uint8, \ + buffer= shm_redframe_init.buf) + + + i=0 + while True: + try: + framenumber = shm_framenumber[0] + if i==0: + last_processed_frame = framenumber + + conditions_for_first_start = (i==0) and\ + (shm_bools[0] == True) and \ + (shm_bools[1] == False) and (shm_bools[2] == False) and (shm_bools[3] == False) \ + and (shm_bools[7] == True) + + conditions_for_starting_processing = (framenumber>last_processed_frame) and (shm_bools[7] == True) + # newframe and all color-channel-processings have to be finished + + if conditions_for_first_start == True: + shm_bools[4] = True # process started + shm_bools[7] = False # reset trigger + + shm_bools[1] = False # set bool for p_red_finished to false + #t1 = time.perf_counter_ns() + do_processing() + i += 1 + #print(f"first processing red finished. frame: {framenumber}") + shm_bools[1] = True # set bool for p_red_finished to true + + shm_bools[4] = False # process ended + elif conditions_for_starting_processing == True: + shm_bools[4] = True # process started + shm_bools[7] = False # reset trigger + #print(f"red: framenumber: {framenumber}, last_processed_frame: {last_processed_frame}") + + shm_bools[1] = False # set bool for p_red_finished to false + #t1 = time.perf_counter_ns() + do_processing() + #print(f"max frame color red: {shm_redframe.max()}") + if show_opencv_window: + cv.imshow("red", shm_redframe) + cv.waitKey(1) + #print(f"processing red finished. frame: {framenumber}") + shm_bools[1] = True # set bool for p_red_finished to true + #t2 = time.perf_counter_ns() + #print(f"processing time for red channel: {round((t2-t1)*1e-6,2)} ms") + + last_processed_frame = framenumber + shm_bools[4] = False # process ended + # elif shm_bools[0] == False: + # pass + #print(f"no new red frame") + + # image processing finished + + + + except KeyboardInterrupt: + try: + shm_bool_init.close() + shm_framenumber_init.close() + shm_redframe_init.close() + + except FileNotFoundError: + # Memory already destroyed + pass + +def processing_green(): + shm_bool_init = shared_memory.SharedMemory(name="shm_bools") + shm_bools = np.ndarray((10,), dtype=np.bool8, buffer= shm_bool_init.buf) # do not: newframe = np.array(...)[0] --> then one can not assign new value in main script + newframe= shm_bools[0] + p_green_finished= shm_bools[2] # not used, but for clarity + p_green_started = shm_bools[5] + + shm_framenumber_init = shared_memory.SharedMemory\ + (name="shm_framenumber") # Attach to existing shared memory block + shm_framenumber = np.ndarray((1,), dtype=np.uint64, \ + buffer= shm_framenumber_init.buf) + # framenumer = shm_framenumber[0] + + shm_greenframe_init = shared_memory.SharedMemory\ + (name="shm_greenframe") # Attach to existing shared memory block + shm_greenframe = np.ndarray((image_heigth,image_width), dtype=np.uint8, \ + buffer= shm_greenframe_init.buf) + + i=0 + while True: + try: + framenumber = shm_framenumber[0] + if i==0: + last_processed_frame = framenumber + + conditions_for_first_start = (i==0) and\ + (shm_bools[0] == True) and \ + (shm_bools[1] == False) and (shm_bools[2] == False) and (shm_bools[3] == False) \ + and (shm_bools[8] == True) + + conditions_for_starting_processing = (framenumber>last_processed_frame) and (shm_bools[8] == True) + + if conditions_for_first_start == True: + shm_bools[5] = True # process started + shm_bools[8] = False # reset trigger + + shm_bools[2] = False # set bool for p_green_finished to false + i += 1 + do_processing() + #print(f"first processing green finished. frame: {framenumber}") + shm_bools[2] = True # set bool for p_green_finished to true + + shm_bools[5] = False # process ended + elif conditions_for_starting_processing == True: + shm_bools[5] = True # process started + shm_bools[8] = False # reset trigger + #print(f"green: framenumber: {framenumber}, last_processed_frame: {last_processed_frame}") + + shm_bools[2] = False # set bool for p_green_finished to false + do_processing() + if show_opencv_window: + cv.imshow("green", shm_greenframe) + cv.waitKey(1) + #print(f"max frame color green: {shm_greenframe.max()}") + #print(f"processing green finished. frame: {framenumber}") + shm_bools[2] = True # set bool for p_green_finished to true + + last_processed_frame = framenumber + shm_bools[5] = False # process ended + # elif shm_bools[0] == False: + # pass + # # print(f"no new green frame") + + # image processing finished + + + + except KeyboardInterrupt: + try: + shm_bool_init.close() + shm_framenumber_init.close() + shm_greenframe_init.close() + except FileNotFoundError: + # Memory already destroyed + pass + +def processing_blue(): + shm_bools_init = shared_memory.SharedMemory(name="shm_bools") + shm_bools = np.ndarray((10,), dtype=np.bool8, buffer= shm_bools_init.buf) # do not: newframe = np.array(...)[0] --> then one can not assign new value in main script + newframe= shm_bools[0] + p_red_finished= shm_bools[3] # not used, but for clarity + p_blue_started = shm_bools[6] + + shm_framenumber_init = shared_memory.SharedMemory\ + (name="shm_framenumber") # Attach to existing shared memory block + shm_framenumber = np.ndarray((1,), dtype=np.uint64, \ + buffer= shm_framenumber_init.buf) + # framenumer = shm_framenumber[0] + + shm_blueframe_init = shared_memory.SharedMemory\ + (name="shm_blueframe") # Attach to existing shared memory block + shm_blueframe = np.ndarray((image_heigth,image_width), dtype=np.uint8, \ + buffer= shm_blueframe_init.buf) + + i=0 + while True: + try: + framenumber = shm_framenumber[0] + if i==0: + last_processed_frame = framenumber + + + conditions_for_first_start = (i==0) and\ + (shm_bools[0] == True) and \ + (shm_bools[1] == False) and (shm_bools[2] == False) and (shm_bools[3] == False) \ + and (shm_bools[9] == True) + + conditions_for_starting_processing = (framenumber>last_processed_frame) and (shm_bools[9] == True) + # newframe and all color-channel-processings have to be finished + if conditions_for_first_start == True: + shm_bools[6] = True # process started + shm_bools[9] = False # reset trigger + + shm_bools[3] = False # set bool for p_blue_finished to false + i += 1 + do_processing() + #print(f"first processing blue finished. frame: {framenumber}") + shm_bools[3] = True # set bool for p_blue_finished to true + + shm_bools[6] = False # process ended + elif conditions_for_starting_processing == True: + shm_bools[6] = True # process started + shm_bools[9] = False # reset trigger + + #print(f"blue: framenumber: {framenumber}, last_processed_frame: {last_processed_frame}") + + shm_bools[3] = False # set bool for p_blue_finished to false + do_processing() + if show_opencv_window: + cv.imshow("blue", shm_blueframe) + cv.waitKey(1) + #print(f"max frame color blue: {shm_blueframe.max()}") + #print(f"processing blue finished. frame: {framenumber}") + shm_bools[3] = True # set bool for p_blue_finished to true + + last_processed_frame = framenumber + shm_bools[6] = False # process ended + # elif shm_bools[0] == False: + # pass + # #print(f"no new blue frame") + + # image processing finished + + + + except KeyboardInterrupt: + try: + shm_bools_init.close() + shm_framenumber_init.close() + shm_blueframe_init.close() + except FileNotFoundError: + # Memory already destroyed + pass + +# ---------------------------------------------------------------------------- +# main +def main(): + start = time.perf_counter() + + try: + # create processes + p_red = Process(target=processing_red) + p_green = Process(target=processing_green) + p_blue = Process(target=processing_blue) + processes = [p_red, p_green, p_blue] + + print(f"waiting 1 second to create processes") + time.sleep(1) # sind prozesse schon vorhanden + + # start acitivity of processes + for process in processes: + process.start() + + # start capturing + get_frames_from_picamera(shutter_speed=33333) #Can see something at normal light conditions + # get_frames_from_picamera(shutter_speed=1000) + + + print('*******************************') + # this below code is only executed if the loop in take_image_picamera_opencv is breaked + # In real use case there will be no end of this program + + # wait for all processes to finisch + + # main is blocked as long all processes are not finished + # The processes will never finish by design (better for performance, not to check for several triggers) + # for process in processes: + # process.join() + + for process in processes: + process.terminate() + + # print time measuring + end = time.perf_counter() + print(f'Script finished in {round(end-start, 2)} s') + + # close each SharedMemory instance and unlink to release the shared memory + shm_bools_create.close() + shm_bools_create.unlink() + shm_framenumber_create.close() + shm_framenumber_create.unlink() + shm_redframe_create.close() + shm_redframe_create.unlink() + shm_greenframe_create.close() + shm_greenframe_create.unlink() + shm_blueframe_create.close() + shm_blueframe_create.unlink() + + except KeyboardInterrupt: + # Normally this prgoram never gets keyboard interrupted! But here this case is nevertheless handled + # End Script + try: + # close each SharedMemory instance and unlink to release the shared memory + shm_bools.close() + shm_bools.unlink() + shm_framenumber_create.close() + shm_framenumber_create.unlink() + shm_redframe_create.close() + shm_redframe_create.unlink() + shm_greenframe_create.close() + shm_greenframe_create.unlink() + shm_blueframe_create.close() + shm_blueframe_create.unlink() + + except FileNotFoundError: + # Memory already destroyed + pass + + +if __name__ == "__main__": + main() diff --git a/02_Kameraaufnahme/mp_Vorbereitung/shared_memory.xlsx b/02_Kameraaufnahme/mp_Vorbereitung/shared_memory.xlsx new file mode 100644 index 0000000..29f8cac Binary files /dev/null and b/02_Kameraaufnahme/mp_Vorbereitung/shared_memory.xlsx differ diff --git a/03_Aufnahmeserie/01_How-To-Start-Scripts.txt b/03_Aufnahmeserie/01_How-To-Start-Scripts.txt new file mode 100644 index 0000000..1dadafc --- /dev/null +++ b/03_Aufnahmeserie/01_How-To-Start-Scripts.txt @@ -0,0 +1,3 @@ +1. start bash +2. workon cv-4.5.3.56 +3. python $SCRIPT_NAME$ \ No newline at end of file diff --git a/03_Aufnahmeserie/01_Read Me.txt b/03_Aufnahmeserie/01_Read Me.txt new file mode 100644 index 0000000..aed5c2a --- /dev/null +++ b/03_Aufnahmeserie/01_Read Me.txt @@ -0,0 +1,5 @@ +#Author: Kenan Gömek +#Date: 07.12.2021 + +Ino-Code muss auf Arduino und der python-code auf den Raspberry. +Dann kann man für alle Farben von 0-255 und Leuchtmuster automatisch fotos aufnehmen. \ No newline at end of file diff --git a/03_Aufnahmeserie/Ansteuerung_LEDs_uC.ino b/03_Aufnahmeserie/Ansteuerung_LEDs_uC.ino new file mode 100644 index 0000000..2b8c82a --- /dev/null +++ b/03_Aufnahmeserie/Ansteuerung_LEDs_uC.ino @@ -0,0 +1,148 @@ +/* This program contains all 20 possible combinations for a lane wich have an orientation. + * The brightness of the color patterns is set here to 255. + * Furthermore colors can be transmitted via rrr-ggg-bbb values. + * It uses serial communication. + */ +#include +#define LED_PIN 9 +#define NUM_LEDS 29 + +String redString; String greenString; String blueString; String inputString; String color_pattern; + +Adafruit_NeoPixel strip = Adafruit_NeoPixel(NUM_LEDS, LED_PIN, NEO_GRB + NEO_KHZ800); + +/* Info: + * set the color of a pixel: + * strip.setPixelColor(n, red, green, blue); + * pixel starts from n=0 + * + * Multiple pixels can be set to the same color using the fill() function, + * which accepts one to three arguments. Typically it’s called like this: + * strip.fill(color, first, count) + */ + +int pause = 3000; // pause in ms + + +// Definition of Colors +int led_brightness = 255; +uint32_t red = strip.Color(led_brightness,0,0); +uint32_t green = strip.Color(0,led_brightness,0); +uint32_t blue = strip.Color(0,0,led_brightness); +uint32_t yellow = strip.Color(led_brightness,led_brightness,0); +uint32_t magenta = strip.Color(led_brightness,0,led_brightness); +uint32_t cyan = strip.Color(0,led_brightness,led_brightness); + + +void setup() { + // put your setup code here, to run once: + Serial.begin(9600); + + strip.begin(); + strip.clear(); strip.show(); // turn off all LEDs + + +} + +void loop() { + // put your main code here, to run repeatedly: + if (Serial.available()) { + inputString = Serial.readString(); + color_pattern = inputString; + Serial.print("Folgender Text wurde empfangen: "); + Serial.println(inputString); + + //String aufteilen in R G B + redString=inputString.substring(0,2+1); + greenString=inputString.substring(4,6+1); + blueString=inputString.substring(8,10+1); + } + + // define color patterns + if (color_pattern == "color_pattern_01") { + SetColorCombination(red, green, blue); // color pattern 01 + } + else if (color_pattern == "color_pattern_02") { + SetColorCombination(red, green, yellow); // color pattern 02 + } + else if (color_pattern == "color_pattern_03") { + SetColorCombination(red, green, magenta); // color pattern 03 + } + else if (color_pattern == "color_pattern_04") { + SetColorCombination(red, green, cyan); // color pattern 04 + } + else if (color_pattern == "color_pattern_05") { + SetColorCombination(red, blue, yellow); // color pattern 05 + } + else if (color_pattern == "color_pattern_06") { + SetColorCombination(red, blue, magenta); // color pattern 06 + } + else if (color_pattern == "color_pattern_07") { + SetColorCombination(red, blue, cyan); // color pattern 07 + } + else if (color_pattern == "color_pattern_08") { + SetColorCombination(red, yellow, magenta); // color pattern 08 + } + else if (color_pattern == "color_pattern_09") { + SetColorCombination(red, yellow, cyan); // color pattern 09 + } + else if (color_pattern == "color_pattern_10") { + SetColorCombination(red, magenta, cyan); // color pattern 10 + } + + else if (color_pattern == "color_pattern_11") { + SetColorCombination(green, blue, yellow); // color pattern 11 + } + else if (color_pattern == "color_pattern_12") { + SetColorCombination(green, blue, magenta); // color pattern 12 + } + else if (color_pattern == "color_pattern_13") { + SetColorCombination(green, blue, cyan); // color pattern 13 + } + else if (color_pattern == "color_pattern_14") { + SetColorCombination(green, yellow, magenta); // color pattern 14 + } + else if (color_pattern == "color_pattern_15") { + SetColorCombination(green, yellow, cyan); // color pattern 15 + } + else if (color_pattern == "color_pattern_16") { + SetColorCombination(green, magenta, cyan); // color pattern 16 + } + + else if (color_pattern == "color_pattern_17") { + SetColorCombination(blue, yellow, magenta); // color pattern 17 + } + else if (color_pattern == "color_pattern_18") { + SetColorCombination(blue, yellow, cyan); // color pattern 18 + } + else if (color_pattern == "color_pattern_19") { + SetColorCombination(blue, magenta, cyan); // color pattern 19 + } + else if (color_pattern == "color_pattern_20") { + SetColorCombination(yellow, magenta, cyan); // color pattern 20 + } + else{ + strip.clear(); + strip.fill(strip.Color(redString.toInt(),greenString.toInt(),blueString.toInt()),0,0); + strip.show(); + } + +} + +// Define Functions +void SetColorCombination(uint32_t color_1, uint32_t color_2, uint32_t color_3){ + // Set the color combination for the whole strip + + //determine whole numer of pixels, which can be displayed in a 3-row-constellation + int number_of_3rowpixels = NUM_LEDS - (NUM_LEDS % 3); // should be 27 if 29 leds are mounted. Modulo = 2: (29 % 3)=(9x3) +2 + int lastpixelindex = number_of_3rowpixels-1; // index starts at 0 + + for(int i=0; i<=lastpixelindex; i=i+3){ + strip.setPixelColor(i, color_1); + strip.setPixelColor(i+1, color_2); + strip.setPixelColor(i+2, color_3); + } + + strip.show(); + +} diff --git a/03_Aufnahmeserie/Ledstripe_Alignment.py b/03_Aufnahmeserie/Ledstripe_Alignment.py new file mode 100644 index 0000000..d92f901 --- /dev/null +++ b/03_Aufnahmeserie/Ledstripe_Alignment.py @@ -0,0 +1,235 @@ +# Creation Date: 14.01.2022 +# Author: Kenan Gömek +# This script takes pictures with Picameras VideoPort like it will be used to work with OpenCV and saves it with OpenCV to have the real use case pictures. +# This script is designed for aligning the LED-Stripe horizontally to the image sensor with LEDs off +# Use: +# 1. turn on the LEDs to e.g. 005-000-000 manually via bash or similar +# 2. execute this script +# 3. adjust shutter speed if desired with x,c,b,n,m (program starts with max shutter speed for selected frame rate) +# 4. align LED strip e.g. to the upper blue line and first led to the left edge of the image +# 5. take image of alignment with 'i' +# 6. end program with 'q' + +import cv2 as cv +import picamera +from picamera.array import PiRGBArray +from fractions import Fraction + +import time +from datetime import datetime +import os + +import numpy as np + + + +# Define camera settings + +# divide origin resoluton by a number, to have the origin aspect ratio + +# RESOLUTION = (3280, 2464) # Max Photo-Resolution CAM03 and CAM04 # no image with PiCamera Videoport at this Resolution.. Probably GPU Memory and CPU issues. +# RESOLUTION = (1640,1232) # 2nd best Resolution for CAM03 and CAM04 with FUll FOV (2x2 binning) # Mode 4 +SENSOR_MODE = 4 # corresponding sensor mode to resolution +OUTPUT_RESOLUTION = (416, 320) # (1640x1232)/4=(410,308) + # (410,308) is being upscaled to (416,320) from ISP (warning in bash), but image will have still (410,308) pixels. +# OUTPUT_RESOLUTION = (820, 616) # (1640x1232)/2=(820,616) + # bash: frame size rounded up from 820x616 to 832x624 + + +AWB_MODE = 'off' # Auto white balance mode +AWB_GAINS = (Fraction(485, 256), Fraction(397, 256)) # White Balance Gains to have colours read correctly: (red, blue) +ISO = 100 # ISO value +EXPOSURE_MODE = 'off' +FRAMERATE = 10 # frames per second. 40 fps is max for sensor mode 4 + +SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s + +# Define Funcions +def take_image_picamera_opencv(shutter_speed): + + # Initialise Camera + with picamera.PiCamera() as camera: + with PiRGBArray(camera) as output: + # Set camera settings + camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode + camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage! + camera.framerate = FRAMERATE + + camera.awb_mode = AWB_MODE + camera.awb_gains = AWB_GAINS + + camera.iso = ISO + camera.shutter_speed = shutter_speed + camera.exposure_mode = EXPOSURE_MODE + + time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings + + # camera.start_preview() # show camera preview through PiCamera interface + # camera.annotate_frame_num=True # Controls whether the current frame number is drawn as an annotation. + + for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)): + framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1 + image = frame.array # raw NumPy array without JPEG encoding + + camera_exposure_speed = camera.exposure_speed # Retrieves the current shutter speed of the camera. + + # cv.imshow("Current Frame", image) # display the image without text + img = alignment_procedure(image, shutter_speed, framenumber, camera_exposure_speed) # show the frame + + framenumber_to_save = 7000 # frame 15 (no particular reason for frame 15) + trigger_save_frame = 'no' # trigger for saving the frame + if framenumber == framenumber_to_save and trigger_save_frame=='yes': # save desired frame + now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M-%S.%f")[:-3] + print(f"Take picture! framenumber: {framenumber} shutter speed: {shutter_speed} µs") + cv.imwrite(f"{path_saveFolder}/ss{shutter_speed}_Date {d1}.png", image) + break # break from the loop, because we took the image we wanted + + output.truncate(0) # clear the stream for next frame + + # Only uncomment following code if you display the image. No errors if not commented, but higher fps if commented. + # if q is pressed, break from loop. + pressed_key = cv.waitKey(1) & 0xff + if pressed_key == ord('q'): + break + elif pressed_key == ord('i'): # Take image from manipulated image if i is pressed + now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M-%S.%f")[:-3] + cv.imwrite(f"{path_saveFolder}/Date {d1}.png", img) + print('took image!') + elif pressed_key == ord('b'): # increase shutterspeed by 50 + shutter_speed = round(shutter_speed+50) + camera.shutter_speed = shutter_speed + elif pressed_key == ord('n'): # increase shutterspeed by 500 + shutter_speed = round(shutter_speed+500) + elif pressed_key == ord('m'): # max shutterspeed + shutter_speed = round(1/FRAMERATE*1e6) + camera.shutter_speed = shutter_speed + elif pressed_key == ord('x'): # decrease shutterspeed by 500 + shutter_speed = round(shutter_speed-500) + camera.shutter_speed = shutter_speed + elif pressed_key == ord('c'): # decrease shutterspeed by 50 + shutter_speed = round(shutter_speed-50) + camera.shutter_speed = shutter_speed + elif pressed_key == ord('o'): # set shutter speed to 0 + shutter_speed = 0 + camera.shutter_speed = shutter_speed + + +def display_image_with_text(img, shutter_speed, framenumber, camera_exposure_speed): + img = img.copy() # make copy of image and do not modify the original image + + font = cv.FONT_HERSHEY_SIMPLEX # font + fontScale = 1 # fontScale + color = (255, 255, 255) # Font colour in BGR + thickness = 1 # Line thickness in px + + # set text position + frame_width = int(img.shape[1]) + frame_height = int(img.shape[0]) + text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height + text_linespacing = 50 # line spacing between two strings in pixels + # text_start_position_X = int(frame_width/4) # start text from 1/4 of image width + text_start_position_X = int(0) # start text from left edge of image + + # set position in (x,y)-coordinated from top left corner. Bottom-left corner of the text string in the image. + pos_1 = (text_start_position_X, text_start_position_Y) # start text from 1/4 of image width + pos_2 = (text_start_position_X, text_start_position_Y+text_linespacing) # start text from 1/4 of image width + pos_3 = (text_start_position_X, text_start_position_Y+2*text_linespacing) # start text from 1/4 of image width + + # define text to display + text_line_1 = f"set shttr-spd: {shutter_speed} us" + text_line_3 = f"Frame: {framenumber}" + text_line_2 = f"ret exp-spd: {camera_exposure_speed} us" + + # put the text into the image + image_text_1 = cv.putText(img, text_line_1, pos_1, font, + fontScale, color, thickness, cv.LINE_AA) + image_text_2 = cv.putText(img, text_line_2, pos_2, font, + fontScale, color, thickness, cv.LINE_AA) + image_text_3 = cv.putText(img, text_line_3, pos_3, font, + fontScale, color, thickness, cv.LINE_AA) + + cv.imshow("Current Frame", img) # display the image + +def alignment_procedure(img, shutter_speed, framenumber, camera_exposure_speed): + img = img.copy() # make copy of image and do not modify the original image + + font = cv.FONT_HERSHEY_SIMPLEX # font + fontScale = 1 # fontScale + color = (255, 255, 255) # Font colour in BGR + thickness = 1 # Line thickness in px + + # set text position + frame_width = int(img.shape[1]) + frame_height = int(img.shape[0]) + text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height + text_linespacing = 50 # line spacing between two strings in pixels + # text_start_position_X = int(frame_width/4) # start text from 1/4 of image width + text_start_position_X = int(0) # start text from left edge of image + + # set position in (x,y)-coordinated from top left corner. Bottom-left corner of the text string in the image. + pos_1 = (text_start_position_X, text_start_position_Y) # start text from 1/4 of image width + pos_2 = (text_start_position_X, text_start_position_Y+text_linespacing) # start text from 1/4 of image width + pos_3 = (text_start_position_X, text_start_position_Y+2*text_linespacing) # start text from 1/4 of image width + + # define text to display + text_line_1 = f"set shttr-spd: {shutter_speed} us" + text_line_3 = f"Frame: {framenumber}" + text_line_2 = f"ret exp-spd: {camera_exposure_speed} us" + + # put the text into the image + image_text_1 = cv.putText(img, text_line_1, pos_1, font, + fontScale, color, thickness, cv.LINE_AA) + image_text_2 = cv.putText(img, text_line_2, pos_2, font, + fontScale, color, thickness, cv.LINE_AA) + image_text_3 = cv.putText(img, text_line_3, pos_3, font, + fontScale, color, thickness, cv.LINE_AA) + + # draw lines into image + alignment_line_thickness = 1 # thickness of the alignment line in pixels + alignment_line_offset = 21 # offset of the alignment line from the center + frame_center = round(frame_height/2) # center of the frame + offset_center = frame_center + 70 # offset of the center for e.g. have the alignment lines more in the bottom part of the image + alignment_row_start = offset_center-alignment_line_offset + alignment_row_end = offset_center+alignment_line_offset + + img[offset_center, :, :] = [255,255,255] # bgr format + img[alignment_row_start-alignment_line_thickness:alignment_row_start, :, :] = [255,0,0] # bgr format + img[alignment_row_end:alignment_row_end+alignment_line_thickness, :, :] = [0,0,255] # bgr format + + cv.imshow("Current Frame", img) # display the image + return img + + +# Start Script + +# Create folder for saving the captured pictures +now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M") +path_cwd = os.getcwd() + +path_saveFolder = path_cwd+r"/Ledstripe_alignment_"+d1 +try: + os.mkdir(path_saveFolder) +except OSError: + print("Error! Ending script. Try it again in 1 minute") + quit() + + + + +# start capture series for different shutter speeds +print('start caputure series...') + +# take_image_picamera_opencv(shutter_speed=2000000) #Can see something at normal light conditions +take_image_picamera_opencv(shutter_speed=round(1/FRAMERATE*1e6)) # max shutter-speed depending on fps: 1/2 fps = 500 000 µs + +# End Script + +cv.destroyAllWindows() + +print('Script finished') + + + + + + diff --git a/03_Aufnahmeserie/Takeimage_Shutterspeed_PiCameraVideoPort_V01-02.py b/03_Aufnahmeserie/Takeimage_Shutterspeed_PiCameraVideoPort_V01-02.py new file mode 100644 index 0000000..ea00265 --- /dev/null +++ b/03_Aufnahmeserie/Takeimage_Shutterspeed_PiCameraVideoPort_V01-02.py @@ -0,0 +1,441 @@ +# Creation Date: 10.01.2022 +# Author: Kenan Gömek +# This script takes pictures with Picameras VideoPort like it will be used to work with OpenCV and saves it with OpenCV to have the real use case pictures. +# Update: This script is designed to take image series with different camera parameters +# Update-Comment: +# To-DO: + +import serial +import cv2 as cv +import picamera +from picamera.array import PiRGBArray +from fractions import Fraction + +import time +from datetime import datetime +import os + +import numpy as np + +# Comment for Measurement-Log: +CAM_NR = "04" # CAM:01,02,03,04 + +COMMENT = "" + +# Set Parameters for series with variable shutterspeeds +# Uncomment/Comment needed code parts for shutter speed lists: +#START_SHUTTER_SPEED = 50 # microseconds. 0=auto +#END_SHUTTER_SPEED = 100 # microseconds +#STEP_SHUTTER_SPEED = 25 # microseconds +# shutter_speeds_add = [47, 66, 85, 104] # microseconds. shutter speeds to additionally add +shutter_speeds_add = [50] # microseconds. shutter speeds to additionally add + # from investigations: ss < 30 are all darker, but the darkness is the same. e.g: ss=5,10,15,20,25 are all the same brightness=Shutter_speed + # with ss <5 µs (because < 20 µs) one can request the minimum posisble shutter speed + # it was found, that ss is adjustable in 20 µs-steps from 30 µs ongoing: [10 µs, 30 µs, 50 µs] in this range + # retrieved ss: 9, 28, 47, 66, 85, 104 µs + +shutter_speeds = list() +#for ss in range(START_SHUTTER_SPEED, END_SHUTTER_SPEED+STEP_SHUTTER_SPEED, STEP_SHUTTER_SPEED): +# shutter_speeds.append(ss) +#shutter_speeds = shutter_speeds + shutter_speeds_add # concatenate lists +shutter_speeds = shutter_speeds_add +shutter_speeds.sort() # sort list + +# camera_resolutions = [(192, 144), (416,320), (640,480), (960,720), (1280,960), (1648,1232)] +camera_resolutions = [(192, 144)] +iso_values = [320] + +# Define camera settings +if CAM_NR == "03": + SENSOR_MODE = 4 # corresponding sensor mode to resolution + OUTPUT_RESOLUTION = (416, 320) # (1640x1232)/4=(410,308) + # Camera informations for log + CAM_DESCRIPTION = "Raspberry Pi Camera Module V2" + CAM_EAN = "506021437024020" +elif CAM_NR == "04": + SENSOR_MODE = 4 # corresponding sensor mode to resolution + # Camera informations for log + CAM_DESCRIPTION = "Raspberry Pi NoIR Camera Module V2" + CAM_EAN = "0640522710898" +elif CAM_NR == ("01"): + SENSOR_MODE = 4 # corresponding sensor mode to resolution --> gibt es sowas für camera 1? Ja gleicher Sensor wie Raspberry Pi V1 sensor! + # Camera informations for log + CAM_DESCRIPTION = "RPI-SPYCAM" + CAM_EAN = "4251266701743 " +elif CAM_NR == ("02"): + SENSOR_MODE = 4 # corresponding sensor mode to resolution --> gibt es sowas für camera 2? --> Ja gleicher Sensor wie Raspberry Pi V1 sensor! + # Camera informations for log + CAM_DESCRIPTION = "RB-Camera_JT" + CAM_EAN = "4250236815909" + + +AWB_MODE = 'off' # Auto white balance mode +# AWB_GAINS = (Fraction(485, 256), Fraction(397, 256)) # White Balance Gains to have colours read correctly: (red, blue) +AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, floar or fraction are valid. +BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100] + #the brighter, the brighter the LEDs and the higher the RGB values and vice versa! +CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100] + +ISO = 320 # ISO value +EXPOSURE_MODE = 'off' +FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4 + +SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s + + +# Dictionary for color patterns +# r: red, g: green, b: blue, y: yellow, m: magenta, c: cyan +dict_color_pattern = { + "01": "rgb", + "02": "rgy", + "03": "rgm", + "04": "rgc", + "05": "rby", + "06": "rbm", + "07": "rbc", + "08": "rym", + "09": "ryc", + "10": "rmc", + "11": "gby", + "12": "gbm", + "13": "gbc", + "14": "gym", + "15": "gyc", + "16": "gmc", + "17": "bym", + "18": "byc", + "19": "bmc", + "20": "ymc", +} + +# Define Funcions +def take_image_picamera_opencv(shutter_speed, iso, resolution, led_rgb_value): + colour_string = led_rgb_value + color_substring_list = [str(x).zfill(2) for x in range(1,21)] # string list to check for color pattern + + # Initialise Camera + with picamera.PiCamera() as camera: + with PiRGBArray(camera) as output: + # Set camera settings + camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode + camera.resolution = resolution # frame will be resized from GPU to this resolution. No CPU usage! + camera.framerate = FRAMERATE + + camera.awb_mode = AWB_MODE + camera.awb_gains = AWB_GAINS + + camera.iso = iso # Iso must be set prior fixing the gains (prior setting exposure_mode to off) + camera.shutter_speed = shutter_speed + # it was found that, you have to set the right shutter speed at the first initalisation of the current runtime of the program. + # The gains (analog, digital) will adjust to this set up. + # After the gains are fixed, they will never change! even if you change the shutter speed during the runtime. + # To get consistent brightness values, set the right shutter speed at initalisation once. + + time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off + # digital_gain: The value represents the digital gain the camera applies after conversion of the sensor’s analog output. + # analog_gain: The value represents the analog gain of the sensor prior to digital conversion. + # digital and analog gain can not be set from the user. + + camera.exposure_mode = EXPOSURE_MODE + # exposure_mode value 'off' overrides the ISO setting. + # For example, 'off' fixes analog_gain and digital_gain entirely, + # preventing this property from adjusting them when set. + time.sleep(1) + camera.brightness = BRIGHTNESS + camera.contrast = CONTRAST + # camera.start_preview() + time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings + exposure_speed = camera.exposure_speed # settings have to be applied before retrieving the right exposure_speed + + # cv.namedWindow("Current Frame", cv.WINDOW_NORMAL) + for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)): + framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1 + image = frame.array # raw NumPy array without JPEG encoding + + #display_image_with_text(image, shutter_speed, framenumber) # show the frame + + framenumber_to_save = 15 # save frame 15 (no particular reason for frame 15) + if framenumber == framenumber_to_save: # save desired frame + now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M-%S") + print(f"Take picture! iso: {iso} shutter speed: {shutter_speed} µs") + if "-" in colour_string: + cv.imwrite(f"{path_saveFolder}/RGB {colour_string}_ss{shutter_speed}_es{exposure_speed}_iso{iso}_b{BRIGHTNESS}_c{CONTRAST}_res{resolution[0]}x{resolution[1]}_Date {d1}.png", image) + elif any(substring in colour_string for substring in color_substring_list): + cv.imwrite(f"{path_saveFolder}/cp {colour_string}-{dict_color_pattern[colour_string]}_ss{shutter_speed}_es{exposure_speed}_iso{iso}_b{BRIGHTNESS}_c{CONTRAST}_res{resolution[0]}x{resolution[1]}_Date {d1}.png", image) + break # break from the loop, because we took the image we wanted + + output.truncate(0) # clear the stream for next frame + + # Only uncomment following code if you display the image. No errors if not commented, but higher fps if commented. + # if q is pressed, break from loop. + # if cv.waitKey(1) & 0xff == ord('q'): + # break + + +def take_image_series_onecolourchannel_per_led(led_rgb_value): + """Take image series in one specified colour.""" + s.write(str.encode(led_rgb_value)); response = s.readline(); print(response) + for i, resolution in enumerate(camera_resolutions): + print(f'resolution {i+1}/{len(camera_resolutions)}: {resolution}') + for ss in shutter_speeds: + for iso in iso_values: + take_image_picamera_opencv(ss, iso, resolution, led_rgb_value) + + +def take_image_series_rgb(brightness): + """Take image series. One colour per series.""" + t1 = time.perf_counter() + + print(f'switchting to red') + led_rgb_value = f"{brightness}-000-000" + s.write(str.encode(led_rgb_value)); response = s.readline(); print(response) + for i, resolution in enumerate(camera_resolutions): + print(f'resolution {i+1}/{len(camera_resolutions)}: {resolution}') + for ss in shutter_speeds: + for iso in iso_values: + take_image_picamera_opencv(ss, iso, resolution, led_rgb_value) + + print('switchting to green') + led_rgb_value = f"000-{brightness}-000" + s.write(str.encode(led_rgb_value)); response = s.readline(); print(response) + for i, resolution in enumerate(camera_resolutions): + print(f'resolution {i+1}/{len(camera_resolutions)}: {resolution}') + for ss in shutter_speeds: + for iso in iso_values: + take_image_picamera_opencv(ss, iso, resolution, led_rgb_value) + + print('switchting to blue') + led_rgb_value = f"000-000-{brightness}" + s.write(str.encode(led_rgb_value)); response = s.readline(); print(response) + for i, resolution in enumerate(camera_resolutions): + print(f'resolution {i+1}/{len(camera_resolutions)}: {resolution}') + for ss in shutter_speeds: + for iso in iso_values: + take_image_picamera_opencv(ss, iso, resolution, led_rgb_value) + + t2 = time.perf_counter() + elapsed_time = round((t2-t1)/60,2) + print(f'series_rgb finished in {elapsed_time} min') + +def take_image_series_ymc(brightness): + """Take image series. One colour per series.""" + t1 = time.perf_counter() + + print(f'switchting to yellow') + led_rgb_value = f"{brightness}-{brightness}-000" + s.write(str.encode(led_rgb_value)); response = s.readline(); print(response) + for i, resolution in enumerate(camera_resolutions): + print(f'resolution {i+1}/{len(camera_resolutions)}: {resolution}') + for ss in shutter_speeds: + for iso in iso_values: + take_image_picamera_opencv(ss, iso, resolution, led_rgb_value) + + print('switchting to magenta') + led_rgb_value = f"{brightness}-000-{brightness}" + s.write(str.encode(led_rgb_value)); response = s.readline(); print(response) + for i, resolution in enumerate(camera_resolutions): + print(f'resolution {i+1}/{len(camera_resolutions)}: {resolution}') + for ss in shutter_speeds: + for iso in iso_values: + take_image_picamera_opencv(ss, iso, resolution, led_rgb_value) + + print('switchting to cyan') + led_rgb_value = f"000-{brightness}-{brightness}" + s.write(str.encode(led_rgb_value)); response = s.readline(); print(response) + for i, resolution in enumerate(camera_resolutions): + print(f'resolution {i+1}/{len(camera_resolutions)}: {resolution}') + for ss in shutter_speeds: + for iso in iso_values: + take_image_picamera_opencv(ss, iso, resolution, led_rgb_value) + + t2 = time.perf_counter() + elapsed_time = round((t2-t1)/60,2) + print(f'series_rgb finished in {elapsed_time} min') + + +def take_image_series_twocolourchannels_per_led(brightness): + """Take image series. Two colours per series.""" + # All posibilities with R G B: RG, RB, GB + + print(f'switchting to red and green') + led_rgb_value = f"{brightness}-{brightness}-000" + s.write(str.encode(led_rgb_value)); response = s.readline(); print(response) + for ss in range(START_SHUTTER_SPEED, END_SHUTTER_SPEED+STEP_SHUTTER_SPEED, STEP_SHUTTER_SPEED): + take_image_picamera_opencv(ss, led_rgb_value) + + print('switchting to red and blue') + led_rgb_value = f"{brightness}-000-{brightness}" + s.write(str.encode(led_rgb_value)); response = s.readline(); print(response) + for ss in range(START_SHUTTER_SPEED, END_SHUTTER_SPEED+STEP_SHUTTER_SPEED, STEP_SHUTTER_SPEED): + take_image_picamera_opencv(ss, led_rgb_value) + + print('switchting to green and blue') + led_rgb_value = f"000-{brightness}-{brightness}" + s.write(str.encode(led_rgb_value)); response = s.readline(); print(response) + for ss in range(START_SHUTTER_SPEED, END_SHUTTER_SPEED+STEP_SHUTTER_SPEED, STEP_SHUTTER_SPEED): + take_image_picamera_opencv(ss, led_rgb_value) + +def takeimage_all_color_patterns(): + """Take images of all 20 color patterns including orientation """ + "The brightness is hard coded to 255 in the Arduino" + + t1 = time.perf_counter() + + print(f'take images for all 20 color patterns') + for number_of_color_pattern in range(1,21): + number_of_color_pattern = str(number_of_color_pattern).zfill(2) + print(f"color pattern no: {number_of_color_pattern}: {dict_color_pattern[number_of_color_pattern]}") + s.write(str.encode(f"color_pattern_{number_of_color_pattern}")) + + for i, resolution in enumerate(camera_resolutions): + print(f'resolution {i+1}/{len(camera_resolutions)}: {resolution}') + for ss in shutter_speeds: + for iso in iso_values: + take_image_picamera_opencv(ss, iso, resolution, number_of_color_pattern) + + t2 = time.perf_counter() + elapsed_time = round((t2-t1)/60,2) + print(f'series_rgb finished in {elapsed_time} min') + + +def display_image_with_text(img, shutter_speed, framenumber): + img = img.copy() # make copy of image and do not modify the original image + + font = cv.FONT_HERSHEY_SIMPLEX # font + fontScale = 1 # fontScale + color = (255, 255, 255) # Font colour in BGR + thickness = 1 # Line thickness in px + + # set text position + frame_width = int(img.shape[1]) + frame_height = int(img.shape[0]) + text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height + text_linespacing = 50 # line spacing between two strings in pixels + + # set position in (x,y)-coordinated from top left corner. Bottom-left corner of the text string in the image. + pos_1 = (int(frame_width/4), text_start_position_Y) # start text from 1/4 of image width + pos_2 = (int(frame_width/4), text_start_position_Y+text_linespacing) # start text from 1/4 of image width + + # define text to display + text_line_1 = f"Shutterspeed: {shutter_speed} us" + text_line_2 = f"Frame: {framenumber}" + + # put the text into the image + image_text_1 = cv.putText(img, text_line_1, pos_1, font, + fontScale, color, thickness, cv.LINE_AA) + image_text_2 = cv.putText(img, text_line_2, pos_2, font, + fontScale, color, thickness, cv.LINE_AA) + + cv.imshow("Current Frame", img) # display the image + + +# Start Script + +# Create folder for saving the captured pictures +now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M") +path_cwd = os.getcwd() + +# path_saveFolder = path_cwd+r"/Solarradiation_"+d1 +path_saveFolder = path_cwd+r"/series_"+d1 +try: + os.mkdir(path_saveFolder) + lines = [f"CAMERA NR: {CAM_NR}", f"CAMERA DESCRIPTION: {CAM_DESCRIPTION}", f"CAMERA EAN: {CAM_EAN}", + f"\n#Camera settings:", f"SENSOR MODE: {SENSOR_MODE}", + f"FRAMERATE: {FRAMERATE} # frames per second", + f"AWB_MODE: {AWB_MODE}", f"AWB_GAINS (red, blue): {AWB_GAINS}", + f"Brightness: {BRIGHTNESS}", f"Contrast: {CONTRAST}", + f"EXPOSURE_MODE: {EXPOSURE_MODE}", + f"\nCOMMENT: {COMMENT}"] + with open(os.path.join(path_saveFolder, "log.txt"), "w") as f: + f.write("\n".join(lines)) +except OSError: + print("Error! Ending script. Try it again in 1 minute") + quit() + +# Turn on LEDs +s = serial.Serial("/dev/ttyACM0", 9600) # Name of USB-Port may vary (here: ACM0) +time.sleep(5) # der Arduino resettet nach einer Seriellen Verbindung, daher muss kurz gewartet werden + + + +# start capture series for different shutter speeds +print('start caputure series...') + + +# s.write(str.encode("000-255-000")); response = s.readline(); print(response) +# take_image_picamera_opencv(shutter_speed=200, led_rgb_value="000-255-000") + + +# take image series for two colour channels per led +# for brgns in range(50,260,50): +# brgns=str(brgns).zfill(3) +# take_image_series_twocolourchannels_per_led(brgns) #Take Image Series for every color in given brightness +# take_image_series_twocolourchannels_per_led(255) #Take Image Series for every color in given brightness + + +# # take image series for white colour +# for brgns in range(50,260,50): +# brgns=f"{brgns}".zfill(3) +# ledcolour=f"{brgns}-{brgns}-{brgns}" +# take_image_series_onecolourchannel_per_led(ledcolour) +# take_image_series_onecolourchannel_per_led("255-255-255") + +# -------------------------------------------------------------------------------------------------------------------------------------------- +# # 03.02.2022: Take images with multiple shutter speeds but in brightness 255 for determining colour falsification to the edges of the image +# take_image_series_rgb(255) +# take_image_series_ymc(255) +# take_image_series_onecolourchannel_per_led("255-255-255") + +# -------------------------------------------------------------------------------------------------------------------------------------------- +# # 26.02.2022: Take images with multiple shutter speeds and brightness for investigating the influence of the solar radiation on the images +# # take image series for rgb +# for brgns in range(50,260,50): +# brgns=str(brgns).zfill(3) +# take_image_series_ymc(brgns) #Take Image Series for every color in given brightness +# take_image_series_ymc(255) #Take Image Series for every color in given brightness + +# # take image series for ymc +# for brgns in range(50,260,50): +# brgns=str(brgns).zfill(3) +# take_image_series_rgb(brgns) #Take Image Series for every color in given brightness +# take_image_series_rgb(255) #Take Image Series for every color in given brightness + +# # take image series for white colour +# for brgns in range(50,260,50): +# brgns=f"{brgns}".zfill(3) +# ledcolour=f"{brgns}-{brgns}-{brgns}" +# take_image_series_onecolourchannel_per_led(ledcolour) +# take_image_series_onecolourchannel_per_led("255-255-255") +# -------------------------------------------------------------------------------------------------------------------------------------------- +# 10.04.2022: Take images with multiple shutter speeds for investigating the influence of the solar radiation on the images +# take image series for rgb +take_image_series_rgb(255) #Take Image Series for every color in given brightness + +# take image series for ymc +take_image_series_ymc(255) #Take Image Series for every color in given brightness + +# take image series for white colour +take_image_series_onecolourchannel_per_led("255-255-255") +# -------------------------------------------------------------------------------------------------------------------------------------------- +# 07.03.2022: Take images of all 20 color patterns with birghtness 255 +takeimage_all_color_patterns() +# -------------------------------------------------------------------------------------------------------------------------------------------- + + +# End Script + +# Turn off LEDs +s.write(str.encode('000-000-000')); response = s.readline(); print(response) +s.close() #close serial port + +cv.destroyAllWindows() + +t1 = round(time.perf_counter()/60,2) + +print(f'Script finished in {t1} min') + + + + + + diff --git a/04_Spurerkennung/Kalibrierung/01_How-To-Start-Script_Calib.txt b/04_Spurerkennung/Kalibrierung/01_How-To-Start-Script_Calib.txt new file mode 100644 index 0000000..68bdee6 --- /dev/null +++ b/04_Spurerkennung/Kalibrierung/01_How-To-Start-Script_Calib.txt @@ -0,0 +1,2 @@ +#Author: Kenan Gömek +python object_size.py --image $IMAGE_PATH$ --width $OBJECT_WIDTH$ \ No newline at end of file diff --git a/04_Spurerkennung/Kalibrierung/object_size.py b/04_Spurerkennung/Kalibrierung/object_size.py new file mode 100644 index 0000000..85c0eb0 --- /dev/null +++ b/04_Spurerkennung/Kalibrierung/object_size.py @@ -0,0 +1,117 @@ +#modified by Kenan Gömek +# Source: https://pyimagesearch.com/2016/03/28/measuring-size-of-objects-in-an-image-with-opencv/ + +# import the necessary packages +from scipy.spatial import distance as dist +from imutils import perspective +from imutils import contours +import numpy as np +import argparse +import imutils +import cv2 + +def midpoint(ptA, ptB): + return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5) + +# construct the argument parse and parse the arguments +ap = argparse.ArgumentParser() +ap.add_argument("-i", "--image", required=True, + help="path to the input image") +ap.add_argument("-w", "--width", type=float, required=True, + help="width of the left-most object in the image (in inches)") +args = vars(ap.parse_args()) + +# load the image, convert it to grayscale, and blur it slightly +image = cv2.imread(args["image"]) +gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) +gray = cv2.GaussianBlur(gray, (7, 7), 0) + +# perform edge detection, then perform a dilation + erosion to +# close gaps in between object edges +edged = cv2.Canny(gray, 50, 100) +edged = cv2.dilate(edged, None, iterations=1) +edged = cv2.erode(edged, None, iterations=1) + +# find contours in the edge map +cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, + cv2.CHAIN_APPROX_SIMPLE) +cnts = imutils.grab_contours(cnts) + +# sort the contours from left-to-right and initialize the +# 'pixels per metric' calibration variable +(cnts, _) = contours.sort_contours(cnts) +pixelsPerMetric = None + +# loop over the contours individually +for c in cnts: + # if the contour is not sufficiently large, ignore it + if cv2.contourArea(c) < 100: + continue + + # compute the rotated bounding box of the contour + orig = image.copy() + box = cv2.minAreaRect(c) + box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box) + box = np.array(box, dtype="int") + + # order the points in the contour such that they appear + # in top-left, top-right, bottom-right, and bottom-left + # order, then draw the outline of the rotated bounding + # box + box = perspective.order_points(box) + cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2) + + # loop over the original points and draw them + for (x, y) in box: + cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1) + + # unpack the ordered bounding box, then compute the midpoint + # between the top-left and top-right coordinates, followed by + # the midpoint between bottom-left and bottom-right coordinates + (tl, tr, br, bl) = box + (tltrX, tltrY) = midpoint(tl, tr) + (blbrX, blbrY) = midpoint(bl, br) + + # compute the midpoint between the top-left and top-right points, + # followed by the midpoint between the top-righ and bottom-right + (tlblX, tlblY) = midpoint(tl, bl) + (trbrX, trbrY) = midpoint(tr, br) + + # draw the midpoints on the image + cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1) + cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1) + cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1) + cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1) + + # draw lines between the midpoints + cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)), + (255, 0, 255), 2) + cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)), + (255, 0, 255), 2) + + # compute the Euclidean distance between the midpoints + dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY)) + dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY)) + # if the pixels per metric has not been initialized, then + # compute it as the ratio of pixels to supplied metric + # (in this case, inches) + if pixelsPerMetric is None: + pixelsPerMetric = dB / args["width"] + print(f"dB: {dB}") + w_kg = args["width"] + print(f"width: {w_kg}") + print(f"pixelsPerMetric: {pixelsPerMetric}") + + # compute the size of the object + dimA = dA / pixelsPerMetric + dimB = dB / pixelsPerMetric + # draw the object sizes on the image + cv2.putText(orig, "{:.2f} mm".format(dimA), + (int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX, + 0.65, (255, 255, 255), 2) + cv2.putText(orig, "{:.2f} mm".format(dimB), + (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX, + 0.65, (255, 255, 255), 2) + # show the output image + cv2.imshow("Image", orig) + cv2.waitKey(0) \ No newline at end of file diff --git a/04_Spurerkennung/Lanedetection_development_V02_findContours.py b/04_Spurerkennung/Lanedetection_development_V02_findContours.py new file mode 100644 index 0000000..c54797a --- /dev/null +++ b/04_Spurerkennung/Lanedetection_development_V02_findContours.py @@ -0,0 +1,454 @@ +# Creation Date: 01.03.2022 +# Author: Kenan Gömek +# This program is for the development of the lane detection algorithm on a windows and Linux machine +# It uses images, which were captured previously +# V02 detects the lane by using the grayscale-image of the rgb-image + +from turtle import position +import cv2 as cv +import numpy as np +import os +import time + +import platform + +# input +if platform.system() == "Windows": + folder_path=r"images_input" +if platform.system() == "Linux": + folder_path=r"/home/pi/Desktop/images_input" + +# Parameters +pixels_per_mm = 32/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320 +# pixels_per_mm = 71/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320 +# pixels_per_mm = 107/24.25 #[px/mm] for 120 mm camera height for resolution: 640x480 + +# Offset Camera Sensor in Scooty according to Scooty-KS +x_offset_camera_mm = 100 # [mm] +y_offset_camera_mm = 50 # [mm] +x_offset_camera_px = x_offset_camera_mm*pixels_per_mm # [px] +y_offset_camera_px = y_offset_camera_mm*pixels_per_mm # [px] + +# image parameters: width > height +image_height = 320 # shape [0] +image_width = 416 # shape[1] +# calculate center of image +[x_0, y_0] = np.array([image_width/2, image_height/2], dtype=np.uint16) + + +threshold_color_detection = 60 # values under this will not be considered as active leds in each color channel + +# Parameters for LED Detection +minDiameter_mm = 3.75 # [mm] minimum diameter of detected blob/LED. Must be minimum >0 ! +# maxDiameter_mm = 9 # [mm] maximum diameter of detected blob/LED + +# Define color numbers to identify the color channels in the matrix with all detected LEDs. No string, because numpy array should stay uint16 +color_number_off = 0 +color_number_red = 1 +color_number_green = 2 +color_number_blue = 3 +color_number_yellow = 4 +color_number_magenta = 5 +color_number_cyan = 6 +color_number_white = 7 + +# Parameters for grayscale to binary conversion +binary_threshold = 15 # determined by testing and application + # the higher threshold is, the smaller the diameter of the led, because more high values are extracted +binary_maxval = 255 # values ofer threshold will be set to maxval + +# Parameters for line fitting for lane construction +param = 0 # not used for DIST_L2 +reps = 0.001 # Sufficient accuracy for the radius (distance between the coordinate origin and the line). +aeps = 0.001 # Sufficient accuracy for the angle. + +show_opencv_window = True # show opencv window +draw_opencv = True # draw lane and so on + +print_additional_info = True + +# calculations before start +# Filter blobs by Area --> not implemented anymore, because no need, because detection is good and less time for calculation needed +# more than good trade off! +minDiameter_px = minDiameter_mm*pixels_per_mm # [px] minimum diameter of detected blob/LED +# maxDiameter_px = maxDiameter_mm*pixels_per_mm # [px] maximum diameter of detected blob/LED +minArea_px2 = np.pi/4*minDiameter_px**2 # min Area of a blob in px^2 +# maxArea_px2 = np.pi/4*maxDiameter_px**2 +# minArea = minArea_px2 # min Area of a blob in px^2 +# params.maxArea = maxArea_px2 # max Area of a blob in px^2. +# reasons for not filtering maxArea: motion blur + rolling shutter --> larger Area + + +def points_trafo(detected_LEDs, alpha_rad, dx, dy): + """Tranfsform points of LED to lane in KS-LED""" + detected_LEDs_trafo = detected_LEDs.copy() # copy, becuase else it is only a pointer + detected_LEDs_trafo = detected_LEDs_trafo.astype(np.int16) # avoid integer overflow + x_pnts = detected_LEDs_trafo[:,0] + y_pnts = detected_LEDs_trafo[:,1] + + # Translation + x1 = x_pnts-dx-x_0 + x_trafo = x1 + y1 = y_pnts-dy-y_0 + y_trafo = y1 + + # Rotation. Winkel Sensor im UZS, also negativ zu mathematischer definiton --> passive Drehung + # cos(-x)=cos(x) + # sin(-x)=-sin(x) --> -sin(-x) = - -sin(x) = sin(x) + # wegen besserer verständlichkeit nicht verinfachung angewendet + x_trafo = np.cos(-alpha_rad)*x1-np.sin(-alpha_rad)*y1 + detected_LEDs_trafo[:,0] = x_trafo + y_trafo = np.sin(-alpha_rad)*x1+np.cos(-alpha_rad)*y1 + detected_LEDs_trafo[:,1] = y_trafo + + #sort points along lane: x_2, y_2 -axis (KS_LED) + detected_LEDs_trafo = detected_LEDs_trafo[detected_LEDs_trafo[:, 0].argsort(kind='quicksort')] + return detected_LEDs_trafo + +def construct_lane(detected_LEDs, img_bgr): + """construct the lane""" + # This function is partially commented in german, because higher math is used + # clearer what is trying to be achieved + + # get points + # xy_pnts = detected_LEDs[:,0:2] + # x_pnts = detected_LEDs[:,0] + # y_pnts = detected_LEDs[:,1] + + # approach 2: + # fit line through centers of LEDs in KS_0 + # DIST_L": the simplest and the fastest least-squares method: the simple euclidean distance + [dx, dy, x_2, y_2] = cv.fitLine(detected_LEDs[:,0:2], cv.DIST_L2, param, reps, aeps) + # x_2, y_2: point on the line --> mean of leds --> point in the middle of the leds + # x2, y2: same as: mean_of_leds = np.mean([x_pnts, y_pnts], 1) + + alpha_rad = np.arctan2(dy, dx) # calculate angle of line + alpha = np.arctan2(dy, dx)*180/np.pi # calculate angle of line + # print(f"Lane: dx: {dx}, dy:{dy}, x2:{x_2}, y2:{y_2}, alpha:{alpha}°") + if print_additional_info: + alpha_print = alpha[0] + alpha_print = float("{0:.2f}".format(alpha_print)) + print(f"Alpha: {alpha_print}°") + + # get smallest distance to point an line + # Berechnung nach: Repetitorium Höhere Mathematik, Wirth + # Gerade: x = a+ t*b + # Punkt : OP = p + # d = abs(b x (p-a))/(abs(b)) + # info: np.array()[:,0] --> gets only array with 1 dimensions with desired values + p = np.array([x_0, y_0]) + a = np.array([x_2, y_2])[:,0] + b = np.array([np.cos(alpha_rad), np.sin(alpha_rad)])[:,0] # Richtungsvektor + c = p-a + + # Betrag von Vektor: np.linalg.norm(vec) + cross= np.cross(b, c) + d = np.linalg.norm(cross)/np.linalg.norm(b) # distance [px] + #print(f"d: {round(d,2)}") + + # Fußpunkt x_FP=(X_LED, Y_LED) + t_0_dot = np.dot(c, b) + t_0_norm = (np.linalg.norm(b)**2) + t_0 = t_0_dot/t_0_norm + [x_LED, y_LED] = (a+t_0*b) + if print_additional_info: + # convert float32, round and prepare for printing string + x_LED_print = float("{0:.2f}".format(x_LED)) + y_LED_print = float("{0:.2f}".format(y_LED)) + print(f"x_LED: {x_LED_print} [px], y_LED: {y_LED_print} [px]") + + # Abstand (dx_LED, dy_LED) Fußpunkt x_FP zu KS_0 (P) + dx_LED = x_LED - x_0 + dx_LED_mm = dx_LED*(1/pixels_per_mm) + dy_LED = y_LED - y_0 + dy_LED_mm = dy_LED*(1/pixels_per_mm) + if print_additional_info: + # convert float32, round and prepare for printing string + dx_LED_print = float("{0:.2f}".format(dx_LED)) + dy_LED_print = float("{0:.2f}".format(dy_LED)) + dx_LED_mm_print = float("{0:.2f}".format(dx_LED_mm)) + dy_LED_mm_print = float("{0:.2f}".format(dy_LED_mm)) + print(f"dx_LED: {dx_LED_print} [px] , dy_LED: {dy_LED_print} [px]") + print(f"dx_LED: {dx_LED_mm_print} [mm] , dy_LED: {dy_LED_mm_print} [mm]") + + # Abstand (dx, dy) Fußpunkt x_FP von Bildmitte zu KS_Scooty + # Diese Werte zurückgeben + dx_LED_scooty = x_LED - x_0 + x_offset_camera_px + dx_LED_scooty_mm = dx_LED_scooty*(1/pixels_per_mm) + dy_LED_scooty = y_LED - y_0 + y_offset_camera_px + dy_LED_scooty_mm = dy_LED_scooty*(1/pixels_per_mm) + if print_additional_info: + print(f"dx_LED_scooty: {round(dx_LED_scooty,2)} [px] , dy_LED_scooty: {round(dy_LED_scooty,2)} [px]") + print(f"dx_LED_scooty: {round(dx_LED_scooty_mm,2)} [mm] , dy_LED_scooty: {round(dy_LED_scooty_mm,2)} [mm]") + + # Punkte Trafo, um sortierte position der LEDs entlang Spur zu erhalten + # Bei normal detected kann bei vertikaler LED zb Fehler entstehen und dann muster: 211323233 -> daher mit dieser sortierten weitermachen + detected_LEDs_KS_LED = points_trafo(detected_LEDs, alpha_rad, dx_LED, dy_LED) + if print_additional_info: + print(f"Detected LEDs in KS_LED:(x2, y2):\n {detected_LEDs_KS_LED}") + + #----------------------------------- + # draw useful lines and points + + # draw lane line + if draw_opencv: + pt_0 = (a+b*np.array([-300, -300])).astype(np.int32) + pt_1 = (a+b*np.array([300, 300])).astype(np.int32) + #print(f"pt_0: {pt_0}, pt_1: {pt_1}") + cv.line(img_bgr, pt_0, pt_1, (255,255,255),1) # draw lane + + # draw dx dy + cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # shortest distance from KS_0 to KS_LED --> Lot + # cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_0)), (0,0,255), 2) # only dx + # cv.line(img_bgr, (int(x_LED), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # only dy + + #draw additional points + cv.circle(img_bgr, (int(x_2), int(y_2)), 5,(255,128,255),-1) #pink. Center of points + #cv.putText(img_bgr, '(x2, y2)',(int(x_2)+5, int(y_2)-5), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), cv.LINE_AA) + cv.circle(img_bgr, (int(x_LED), int(y_LED)), 5,(170,255,0),-1) # lime green. Fußpunkt + + if show_opencv_window: + cv.imshow("Lane", img_bgr) + + return dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED + +def convert_rgb_to_grayscale_average(image_bgr): + """This function converts the RGB image into an grayscale image. + Algorithm: Average: Y = (R+G+B)/3""" + + # convert dtype to prevent integer overflow while addition + image_bgr = image_bgr.astype(np.uint16, copy=False) + image_gray = (image_bgr[:,:,0]+image_bgr[:,:,1]+image_bgr[:,:,2])/3 # add values / do conversion + image_gray = image_gray.astype(np.uint8, copy=False) # convert back to uint8 + + return image_gray + + +def get_color_of_leds(matrix_of_LEDs, image_bgr): + """Determine color of LEDs at positions and add to matrix""" + # is image_r[y_pos, x_pos] = image_bgr[y_pos,x_pos, 2] ? --> yes. No need to split the color channels. + + offset = 1 # half of length from rectangle which is going to be used to determine the color around the middle point of the blob/led + # offset=0 --> only the value from the middle point of the blob/led + # offset=1 --> 9 values, offset=2-->25 values + + for led in matrix_of_LEDs: + x_pos = led[0] # uint16 + y_pos = led[1] # uint16 + + # get values of color channels in region around middle point of blob/led: + # +1 at stop index, because it is not inclusive + region_around_blue_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 0] # uint8 + region_around_green_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 1] # uint8 + region_around_red_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 2] # uint8 + + # average of the values + # convert dtype to prevent integer overflow while addition + region_around_red_led = region_around_red_led.astype(np.uint16, copy=False) + region_around_green_led = region_around_green_led.astype(np.uint16, copy=False) + region_around_blue_led = region_around_blue_led.astype(np.uint16, copy=False) + # sum all elements in matrix and divide with number of elements + number_of_elements= region_around_blue_led.size + value_of_red_led = region_around_red_led.sum()/number_of_elements # float64, if not integer result + value_of_green_led = region_around_green_led.sum()/number_of_elements # float64, if not integer result + value_of_blue_led = region_around_blue_led.sum()/number_of_elements # float64, if not integer result + + # determine which leds are active: + # if value > threshold --> led is active + status_blue_led = False; status_green_led = False; status_red_led = False + if value_of_blue_led > threshold_color_detection: + status_blue_led = True + if value_of_green_led > threshold_color_detection: + status_green_led = True + if value_of_red_led > threshold_color_detection: + status_red_led = True + + # determine color by checking the cases: + # case 1: red + if status_blue_led==False and status_green_led==False and status_red_led==True: + color = color_number_red + # case 2: green + elif status_blue_led==False and status_green_led==True and status_red_led==False: + color = color_number_green + # case 3: blue + elif status_blue_led==True and status_green_led==False and status_red_led==False: + color = color_number_blue + # case 4: yellow = red + green + elif status_blue_led==False and status_green_led==True and status_red_led==True: + color = color_number_yellow + # case 5: magenta = red + blue + elif status_blue_led==True and status_green_led==False and status_red_led==True: + color = color_number_magenta + # case 6: cyan = green + blue + elif status_blue_led==True and status_green_led==True and status_red_led==False: + color = color_number_cyan + # case 7: white = red + green + blue + elif status_blue_led==True and status_green_led==True and status_red_led==True: + color = color_number_white + # case 8: led not active + # this case can not occur, because no inactive led can be detected from the implemented blob-algorithm in detect_LED_positions_in_grayscale + else: + color = color_number_off + + # fill matrix with color + led[2] = color # uint16 + + return matrix_of_LEDs + +def get_position_of_LEDs_contours(image_gray, image_bgr): + # create binary image + ret, image_binary = cv.threshold(image_gray, binary_threshold, binary_maxval, cv.THRESH_BINARY) + + # find contours + contour_retrieval_algorithm = cv.RETR_EXTERNAL # retrieves only the extreme outer contours + contours, hierarchy = cv.findContours(image_binary, contour_retrieval_algorithm, cv.CHAIN_APPROX_SIMPLE) + + # analyse contours + number_of_detected_contours = len(contours) + if number_of_detected_contours != 0: + # centroid of contours + # Pre-allocate matrix for numpy + number_of_rows = 0 + number_of_columns = 3 + position_of_leds = np.zeros((number_of_rows, number_of_columns), dtype=np.uint16) #empty: [] + + if draw_opencv: + image_bgr_contours = image_bgr.copy() # create copy of image + # copy is needed to draw on, because else the color of + # the circle is detected als LED-Color + + number_of_detected_LEDs = 0 + for i, cnt in enumerate(contours): + area = cv.contourArea(cnt) + # diameter = 2*np.sqrt(area/np.pi) + # diameter_mm = diameter*(1/pixels_per_mm) + # print(f"area: {area} [px^2], diameter: {diameter} [px], diamter: {diameter_mm} [mm]") + + # Filter contours by area. minimum Area needs to be at least >0 ! + if area > minArea_px2: + M = cv.moments(cnt) + number_of_detected_LEDs += 1 + # prevent zero division + if M['m00']==0: + cx = 0 + cy = 0 + else: + cx = int(M['m10']/M['m00']) + cy = int(M['m01']/M['m00']) + #print(cx, cy) + # add positions to matrix + x_pos = int(cx) # x positon + y_pos = int(cy) # y position + position_of_leds = np.vstack((position_of_leds, \ + np.array([x_pos, y_pos, color_number_off], dtype=np.uint16))) # vstack: row wise + + # draw centroids + if draw_opencv: + radius = 2 + color = (255,255,255) + thickness = -1 # filled + cv.circle(image_bgr_contours,(cx,cy), radius, color, thickness) + + if number_of_detected_LEDs != 0: + if print_additional_info: + print(f"detected LEDs: {number_of_detected_LEDs}") + + if draw_opencv: + # draw contours + contours_to_pass = -1 # pass all contours + color_of_contour = (255,255,255) + line_thickness = 1 + cv.drawContours(image_bgr_contours, contours, contours_to_pass, color_of_contour, line_thickness) + + if show_opencv_window: + cv.imshow("binary", image_binary) + cv.imshow("Contours", image_bgr_contours) + + return position_of_leds + else: + if print_additional_info: + print(f"No LEDs were detected") + return None + + else: + if print_additional_info: + print(f"No contours were detected") + return None + +def detect_position_of_LEDs(image_bgr): + + # convert rgb to grayscale + image_gray = convert_rgb_to_grayscale_average(image_bgr) + if show_opencv_window: + cv.imshow("grayscale", image_gray) + + # get position of leds + position_of_LEDs = get_position_of_LEDs_contours(image_gray, image_bgr) + # position_of_LEDs = None + + return position_of_LEDs + + + +def lane_detection(image_bgr): + # Detect LEDs + if print_additional_info: + print(f"Detect LEDs and color:") + position_of_LEDs = detect_position_of_LEDs(image_bgr) + # detected_LEDs = None # only that following code is not being executet for development + + # Get color of leds + if position_of_LEDs is not None: + detected_LEDs = get_color_of_leds(position_of_LEDs, image_bgr) + # print result + if print_additional_info: + print(f"Detected LEDs in KS_Sensor (x,y):\n{detected_LEDs}") + else: + detected_LEDs = None + + + # Contruct lane + if detected_LEDs is not None: + if print_additional_info: + print("\nContruct lane with consideration of camera offset:") + dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED = \ + construct_lane(detected_LEDs, image_bgr) + return detected_LEDs + else: + return None + + +def main(): + filenames_of_images = [f for f in os.listdir(folder_path) if f.endswith('.png')] + + + for i, filename_of_image in enumerate(filenames_of_images): + if print_additional_info: + print(f"image {i+1}/{len(filenames_of_images)}:{filename_of_image}") + full_path_of_image = os.path.join(folder_path, filename_of_image) + image_bgr = cv.imread(full_path_of_image, cv.IMREAD_COLOR) # load original image + + start_processing = time.perf_counter() + + + detected_LEDs = lane_detection(image_bgr) + end_processing = time.perf_counter() + time_processing = end_processing-start_processing + time_processing = time_processing*1000 + time_processing = round(time_processing, 2) + print(f'processing time: {time_processing} ms') + + #print(f"_____________________________________") + # show images: + # cv.imshow("Blue channel", image_b) + # cv.imshow("Green channel", image_g) + # cv.imshow("Red channel", image_r) + + if show_opencv_window: + pressed_key = cv.waitKey(0) & 0xff # display and wait if a key is pressed and then continue + if pressed_key == ord('q'): + exit() + cv.destroyAllWindows() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/04_Spurerkennung/dev/Lanedetection_development_V01.py b/04_Spurerkennung/dev/Lanedetection_development_V01.py new file mode 100644 index 0000000..2e7582f --- /dev/null +++ b/04_Spurerkennung/dev/Lanedetection_development_V01.py @@ -0,0 +1,254 @@ +# Creation Date: 01.03.2022 +# Author: Kenan Gömek +# This program is for the development of the lane detection algorithm on a windows machine +# It uses images, which were captured previously + +import cv2 as cv +import numpy as np +import os + +# input +folder_path=r"U:\bwsyncshare\images_input" + +# Parameters +pixels_per_mm = 71/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320 +# pixels_per_mm = 107/24.25 #[px/mm] for 120 mm camera height for resolution: 640x480 + +cut_off_brightness_grayscale = 45 # 15 pixels with a value less than this will bet set to zero (black) + + +# Parameters for HoughCircles +dp = 1 # Inverse ratio of the accumulator resolution to the image resolution. For example, if dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has half as big width and height. +minDist_mm = 10 # [mm] minimal distance between two circles +minDist_px = int(minDist_mm*pixels_per_mm) # in [px] Minimum distance in px between the centers of the detected circles. If the parameter is too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is too large, some circles may be missed. +minRadius_mm = 5 # [mm] minimum radius of a circle +minRadius_px = int(minRadius_mm*pixels_per_mm) # [px] Minimum circle radius. +maxRadius_mm = 7 # [mm] maximum radius of a circle +maxRadius_px = int(maxRadius_mm*pixels_per_mm) # [px] Maximum circle radius. If <= 0, uses the maximum image dimension. If < 0, returns centers without finding the radius. + +param1 = 100 # 30 First method-specific parameter. In case of HOUGH_GRADIENT , it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller). + # If circles/LEDs should be detected at low shutter speeds, than lower this value + # Upper threshold for the internal Canny edge detector. + # "Gradient value between dark and white" +param2 = 5 # 12 Second method-specific parameter. In case of HOUGH_GRADIENT , it is the accumulator threshold for the circle centers at the detection stage. The smaller it is, the more false circles may be detected. Circles, corresponding to the larger accumulator values, will be returned first. + # By increasing this threshold value, we can ensure that only the best circles, corresponding to larger accumulator values, are returned. + +# Define color numbers to identify the color channels in the matrix with all detected LEDs. No string, because numpy array should stay uint16 +color_number_red = 1 +color_number_green = 2 +color_number_blue = 3 + +def preprocess_grayscale_image_1(image_gray_copy, color_channel): + + # Set all pixels with a value less than "cut_off_brightness_grayscale" to zero + image_gray_cutoff = image_gray_copy.copy() + image_gray_cutoff[image_gray_cutoff < cut_off_brightness_grayscale]=0 + cv.imshow(f"{color_channel} channel cutoff", image_gray_cutoff[150:,:]) + + preprocessed_image = image_gray_cutoff + return preprocessed_image + +def preprocess_grayscale_image_2(image_gray_copy, color_channel): + # Set all pixels with a value less than "cut_off_brightness_grayscale" to zero + # This prevents the false detection of color-channels which are not intended to be detected + image_gray_cutoff = image_gray_copy.copy() + image_gray_cutoff[image_gray_cutoff < cut_off_brightness_grayscale]=0 + # cv.imshow(f"{color_channel} channel grayscale cutoff", image_gray_cutoff[150:,:]) + + # For better accuracy, binary images are used before finding contours --> apply treshold + image_gray_binary = image_gray_cutoff + perc = np.percentile(image_gray_binary, 95) # calculate 95 % percentile + ret, tresh=cv.threshold(image_gray_binary, perc, 255, cv.THRESH_BINARY) # set pixels above specified percentile to 255, the resto to 0 + + preprocessed_image = tresh + return preprocessed_image + +def preprocess_grayscale_image_3(image_gray_copy, color_channel): + # Set all pixels with a value less than "cut_off_brightness_grayscale" to zero + # This prevents the false detection of color-channels which are not intended to be detected + image_gray_cutoff = image_gray_copy.copy() + image_gray_cutoff[image_gray_cutoff < cut_off_brightness_grayscale]=0 + perc = np.percentile(image_gray_cutoff, 95) # calculate 95 % percentile + image_gray_cutoff[image_gray_cutoff < perc]=0 + cv.imshow(f"{color_channel} channel grayscale cutoff", image_gray_cutoff[150:,:]) + + # For better accuracy, binary images are used before finding contours --> apply treshold + image_gray_binary = image_gray_cutoff + perc = np.percentile(image_gray_binary, 95) # calculate 95 % percentile + ret, tresh=cv.threshold(image_gray_binary, perc, 255, cv.THRESH_BINARY) # set pixels above specified percentile to 255, the resto to 0 + + preprocessed_image = image_gray_cutoff + return preprocessed_image + +def preprocess_grayscale_image_4_cannyedge(image_gray_copy, color_channel): + # Set all pixels with a value less than "cut_off_brightness_grayscale" to zero + # This prevents the false detection of color-channels which are not intended to be detected + t_lower = 1 + t_upper = 5 + + edge = cv.Canny(image_gray_copy, t_lower, t_upper) + cv.imshow(f"{color_channel} channel edge", edge[150:,:]) + + preprocessed_image = edge + return preprocessed_image + + +def detect_LEDs_in_color_channel(image_gray, color_channel, image_gray_copy, image_bgr_copy): + + # preprocessed_gray_image = preprocess_grayscale_image_1(image_gray_copy=image_gray_copy, color_channel=color_channel) + preprocessed_gray_image = preprocess_grayscale_image_2(image_gray_copy=image_gray_copy, color_channel=color_channel) + # preprocessed_gray_image = preprocess_grayscale_image_3(image_gray_copy=image_gray_copy, color_channel=color_channel) + # preprocessed_gray_image = preprocess_grayscale_image_4_cannyedge(image_gray_copy=image_gray_copy, color_channel=color_channel) + # preprocessed_gray_image = image_gray_copy + + detected_LEDs = cv.HoughCircles(preprocessed_gray_image, cv.HOUGH_GRADIENT, dp=dp, minDist = minDist_px + , param1=param1, param2=param2, minRadius=minRadius_px, maxRadius=maxRadius_px) + + # specify color number, for adding to matrix of detected LEDs + if color_channel == "blue": + color_number = color_number_blue + elif color_channel == "green": + color_number = color_number_green + elif color_channel == "red": + color_number = color_number_red + + # check if at least one circle was found in the image + if detected_LEDs is not None: + detected_LEDs = np.uint16(np.round(detected_LEDs)) # convert the (x, y) coordinates and radius of the circles to integers + detected_LEDs = detected_LEDs[0,:] + detected_LEDs=np.hstack((detected_LEDs, np.full((detected_LEDs.shape[0],1), color_number, dtype=np.uint16))) + # matrix with columns: x, y, r + number_of_detected_LEDs = detected_LEDs.shape[0] + print(f"detected {color_channel} LEDs: {number_of_detected_LEDs}") + + # paramters for drawing + line_thickness = 1 + circle_color = (0,255,0) + vertex_offset = 2 + rectangle_color = (0,128,255) # R G B + for (x, y, r, cn) in detected_LEDs: + print(f"x:{x} px, y:{y} px, r:{r} px, r:{round(r*1/(pixels_per_mm),2)} mm, D: {round(2*r*1/(pixels_per_mm),2)} mm, color: {color_channel}") + cv.circle(image_bgr_copy, (x, y), r, circle_color, thickness=line_thickness) # draw detected circumference of the cirle + cv.circle(image_gray_copy, (x, y), r, circle_color, thickness=line_thickness) # draw detected circumference of the cirle + cv.circle(preprocessed_gray_image, (x, y), r, circle_color, thickness=1) # draw detected circumference of the cirle + cv.rectangle(img=image_bgr_copy, pt1=(x-vertex_offset, y-vertex_offset), pt2=(x+vertex_offset, y+vertex_offset), \ + color=rectangle_color, thickness=cv.FILLED) + cv.rectangle(img=image_gray_copy, pt1=(x-vertex_offset, y-vertex_offset), pt2=(x+vertex_offset, y+vertex_offset), \ + color=rectangle_color, thickness=cv.FILLED) + cv.rectangle(img=preprocessed_gray_image, pt1=(x-vertex_offset, y-vertex_offset), pt2=(x+vertex_offset, y+vertex_offset), \ + color=rectangle_color, thickness=cv.FILLED) + + cv.imshow(f"{color_channel} channel binary", preprocessed_gray_image[150:,:]) + + return detected_LEDs + else: + print(f"No {color_channel} LEDs were found in the image") + return None + + + +def detect_blue_LEDs(image_colorchannel_gray, image_colorchannel_gray_copy, image_bgr_copy): + color_channel = "blue" + detected_LEDs_blue = detect_LEDs_in_color_channel(image_gray=image_colorchannel_gray, color_channel=color_channel, \ + image_gray_copy=image_colorchannel_gray_copy, image_bgr_copy=image_bgr_copy) + + if detected_LEDs_blue is not None: + return detected_LEDs_blue + else: + return None + +def detect_green_LEDs(image_colorchannel_gray, image_colorchannel_gray_copy, image_bgr_copy): + color_channel = "green" + detected_LEDs_green = detect_LEDs_in_color_channel(image_gray=image_colorchannel_gray, color_channel=color_channel, \ + image_gray_copy=image_colorchannel_gray_copy, image_bgr_copy=image_bgr_copy) + if detected_LEDs_green is not None: + return detected_LEDs_green + else: + return None + +def detect_red_LEDs(image_colorchannel_gray, image_colorchannel_gray_copy, image_bgr_copy): + color_channel = "red" + detected_LEDs_red = detect_LEDs_in_color_channel(image_gray=image_colorchannel_gray, color_channel=color_channel, \ + image_gray_copy=image_colorchannel_gray_copy, image_bgr_copy=image_bgr_copy) + if detected_LEDs_red is not None: + return detected_LEDs_red + else: + return None + +def detect_LEDs(image_b, image_g, image_r, image_b_copy, image_g_copy, image_r_copy, image_bgr_copy): + detected_LEDs_blue = detect_blue_LEDs(image_colorchannel_gray=image_b, image_colorchannel_gray_copy=image_b_copy, image_bgr_copy=image_bgr_copy) + detected_LEDs_green = detect_green_LEDs(image_colorchannel_gray=image_g, image_colorchannel_gray_copy=image_g_copy, image_bgr_copy=image_bgr_copy) + detected_LEDs_red = detect_red_LEDs(image_colorchannel_gray=image_r, image_colorchannel_gray_copy=image_r_copy, image_bgr_copy=image_bgr_copy) + + # check the cases: + # case 1: r + if detected_LEDs_blue is None and detected_LEDs_green is None and detected_LEDs_red is not None: + detected_LEDs = detected_LEDs_red + # case 2: g + elif detected_LEDs_blue is None and detected_LEDs_green is not None and detected_LEDs_red is None: + detected_LEDs = detected_LEDs_green + # case 3: b + elif detected_LEDs_blue is not None and detected_LEDs_green is None and detected_LEDs_red is None: + detected_LEDs = detected_LEDs_blue + # case 4: y = r+g + elif detected_LEDs_blue is None and detected_LEDs_green is not None and detected_LEDs_red is not None: + detected_LEDs_all = np.vstack((detected_LEDs_red, detected_LEDs_green)) + detected_LEDs = detected_LEDs_all + # case 5: m = r+b + elif detected_LEDs_blue is not None and detected_LEDs_green is None and detected_LEDs_red is not None: + detected_LEDs_all = np.vstack((detected_LEDs_red, detected_LEDs_blue)) + detected_LEDs = detected_LEDs_all + # case 6: c = g+b + elif detected_LEDs_blue is not None and detected_LEDs_green is not None and detected_LEDs_red is None: + detected_LEDs_all = np.vstack((detected_LEDs_green, detected_LEDs_blue)) + detected_LEDs = detected_LEDs_all + # case 7: w = r+g+b + elif detected_LEDs_blue is not None and detected_LEDs_green is not None and detected_LEDs_red is not None: + detected_LEDs_all = np.vstack((detected_LEDs_red, detected_LEDs_green, detected_LEDs_blue)) + detected_LEDs = detected_LEDs_all + + return detected_LEDs + +def lane_detection(image_b, image_g, image_r, image_b_copy, image_g_copy, image_r_copy, image_bgr_copy): + # Detect LEDs + detected_LEDs = detect_LEDs(image_b, image_g, image_r, image_b_copy, image_g_copy, image_r_copy, image_bgr_copy) + return detected_LEDs + + +def main(): + filenames_of_images = [f for f in os.listdir(folder_path) if f.endswith('.png')] + + for i, filename_of_image in enumerate(filenames_of_images): + print(f"image:{filename_of_image}") + full_path_of_image = os.path.join(folder_path, filename_of_image) + image_bgr = cv.imread(full_path_of_image, cv.IMREAD_COLOR) # load original image + image_b,image_g,image_r = cv.split(image_bgr) # Split colour channels and get grayscale images + + # create copy of images, to draw on them + image_bgr_copy = image_bgr.copy() + image_b_copy = image_b.copy() + image_g_copy = image_g.copy() + image_r_copy = image_r.copy() + + detected_LEDs = lane_detection(image_b, image_g, image_r, image_b_copy, image_g_copy, image_r_copy, image_bgr_copy) + print(detected_LEDs) + + + print(f"_____________________________________") + # show images: only region of interest + cv.imshow("Original image", image_bgr[150:,:]) + cv.imshow("All detected LEDs", image_bgr_copy[150:,:]) + + cv.imshow("Blue channel", image_b[150:,:]) + cv.imshow("Blue channel detected", image_b_copy[150:,:]) + + cv.imshow("Green channel", image_g[150:,:]) + cv.imshow("Green channel detected", image_g_copy[150:,:]) + + cv.imshow("Red channel", image_r[150:,:]) + cv.imshow("Red channel detected", image_r_copy[150:,:]) + + cv.waitKey(0) # display and wait if a key is pressed and then continue + cv.destroyAllWindows() +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/04_Spurerkennung/dev/Lanedetection_development_V02.py b/04_Spurerkennung/dev/Lanedetection_development_V02.py new file mode 100644 index 0000000..9487e9b --- /dev/null +++ b/04_Spurerkennung/dev/Lanedetection_development_V02.py @@ -0,0 +1,445 @@ +# Creation Date: 01.03.2022 +# Author: Kenan Gömek +# This program is for the development of the lane detection algorithm on a windows machine +# It uses images, which were captured previously +# V02 detects the lane by suing the grayscale-image of the rgb-image + +import cv2 as cv +import numpy as np +import os +import time + +import platform + +# input +if platform.system() == "Windows": + folder_path=r"U:\bwsyncshare\SA\Software\Code\02_SA\04_Spurerkennung\images_input" +if platform.system() == "Linux": + folder_path=r"/home/pi/Desktop/Studienarbeit/04_Spurerkennung/images_input" + +# Parameters +pixels_per_mm = 71/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320 +# pixels_per_mm = 107/24.25 #[px/mm] for 120 mm camera height for resolution: 640x480 + +# Offset Camera Sensor in Scooty according to Scooty-KS +x_offset_camera_mm = 100 # [mm] +y_offset_camera_mm = 50 # [mm] +x_offset_camera_px = x_offset_camera_mm*pixels_per_mm # [px] +y_offset_camera_px = y_offset_camera_mm*pixels_per_mm # [px] + +# image parameters: width > height +image_height = 320 # shape [0] +image_width = 416 # shape[1] +# calculate center of image +[x_0, y_0] = np.array([image_width/2, image_height/2], dtype=np.uint16) + + +threshold_color_detection = 60 # values under this will not be considered as active leds in each color channel + +# Parameters for Blob/LED Detection +minDiameter_mm = 5 # [mm] minimum diameter of detected blob/LED +maxDiameter_mm = 9 # [mm] maximum diameter of detected blob/LED + +# Define color numbers to identify the color channels in the matrix with all detected LEDs. No string, because numpy array should stay uint16 +color_number_off = 0 +color_number_red = 1 +color_number_green = 2 +color_number_blue = 3 +color_number_yellow = 4 +color_number_magenta = 5 +color_number_cyan = 6 +color_number_white = 7 + +show_opencv_window = True # show opencv window +draw_opencv = True # draw lane and so on + +print_additional_info = True + +def points_trafo(detected_LEDs, alpha_rad, dx, dy): + """Tranfsform points of LED to lane in KS-LED""" + detected_LEDs_trafo = detected_LEDs.copy() # copy, becuase else it is only a pointer + detected_LEDs_trafo = detected_LEDs_trafo.astype(np.int16) # avoid integer overflow + x_pnts = detected_LEDs_trafo[:,0] + y_pnts = detected_LEDs_trafo[:,1] + + # Translation + x1 = x_pnts-dx-x_0 + x_trafo=x1 + y1 = y_pnts-dy-y_0 + y_trafo = y1 + + # Rotation. Winkel Sensor im UZS, also negativ zu mathematischer definiton + x_trafo = np.cos(-alpha_rad)*x1-np.sin(-alpha_rad)*y1 + detected_LEDs_trafo[:,0] = x_trafo + y_trafo = np.sin(-alpha_rad)*x1+np.cos(-alpha_rad)*y1 + detected_LEDs_trafo[:,1] = y_trafo + + #sort points along lane: x_2, y_2 -axis (KS_LED) + detected_LEDs_trafo = detected_LEDs_trafo[detected_LEDs_trafo[:, 0].argsort(kind='quicksort')] + return detected_LEDs_trafo + +def construct_lane(detected_LEDs, img_bgr): + """construct the lane""" + # This function is partially commented in german, because higher math is used + # clearer what is trying to be achieved + + # get points + # xy_pnts = detected_LEDs[:,0:2] + # x_pnts = detected_LEDs[:,0] + # y_pnts = detected_LEDs[:,1] + + # approach 2: + # fit line through centers of LEDs in KS_0 + # DIST_L": the simplest and the fastest least-squares method: the simple euclidean distance + param = 0 # not used for DIST_L2 + reps = 0.001 # Sufficient accuracy for the radius (distance between the coordinate origin and the line). + aeps = 0.001 # Sufficient accuracy for the angle. + [dx, dy, x_2, y_2] = cv.fitLine(detected_LEDs[:,0:2], cv.DIST_L2, param, reps, aeps) + # x2, y2: same as: mean_of_leds = np.mean([x_pnts, y_pnts], 1) + + alpha_rad = np.arctan2(dy, dx) # calculate angle of line + alpha = np.arctan2(dy, dx)*180/np.pi # calculate angle of line + # print(f"Lane: dx: {dx}, dy:{dy}, x2:{x_2}, y2:{y_2}, alpha:{alpha}°") + if print_additional_info: + print(f"Lane: alpha:{alpha[0]}°") + + # get smallest distance to point an line + # Berechnung nach: Repetitorium Höhere Mathematik, Wirth + # Gerade: x = a+ t*b + # Punkt : OP = p + # d = abs(b x (p-a))/(abs(b)) + # info: np.array()[:,0] --> gets only array with 1 dimensions with desired values + p = np.array([x_0, y_0]) + a = np.array([x_2, y_2])[:,0] + b = np.array([np.cos(alpha_rad), np.sin(alpha_rad)])[:,0] # Richtungsvektor + c = p-a + + # Betrag von Vektor: np.linalg.norm(vec) + cross= np.cross(b, c) + d = np.linalg.norm(cross)/np.linalg.norm(b) # distance [px] + #print(f"d: {round(d,2)}") + + # Fußpunkt (X_LED, Y_LED) + t_0_dot = np.dot(c, b) + t_0_norm = (np.linalg.norm(b)**2) + t_0 = t_0_dot/t_0_norm + [x_LED, y_LED] = (a+t_0*b) + if print_additional_info: + print(f"x_LED: {x_LED}, y_LED: {y_LED}") + + # Abstand (dx, dy) Fußpunkt zu KS_0 + dx_LED = x_LED - x_0 + dx_LED_mm = dx_LED*(1/pixels_per_mm) + dy_LED = y_LED - y_0 + dy_LED_mm = dy_LED*(1/pixels_per_mm) + if print_additional_info: + print(f"dx_LED:{dx_LED} [px] , dy_LED:{dy_LED} [px]") + print(f"dx_LED:{dx_LED_mm} [mm] , dy_LED:{dy_LED_mm} [mm]") + + # Abstand (dx, dy) Fußpunkt von Bildmitte zu KS_Scooty + # Diese Werte zurückgeben + dx_LED_scooty = x_LED - x_0 + x_offset_camera_px + dx_LED_scooty_mm = dx_LED_scooty*(1/pixels_per_mm) + dy_LED_scooty = y_LED - y_0 + y_offset_camera_px + dy_LED_scooty_mm = dy_LED_scooty*(1/pixels_per_mm) + if print_additional_info: + print(f"dx_LED_scooty:{dx_LED_scooty} [px] , dy_LED_scooty:{dy_LED_scooty} [px]") + print(f"dx_LED_scooty:{dx_LED_scooty_mm} [mm] , dy_LED_scooty:{dy_LED_scooty_mm} [mm]") + + # Punkte Trafo, um sortierte position der LEDs entlang Spur zu erhalten + # Bei normal detected kann bei vertikaler LED zb Fehler entstehen und dann muster: 211323233 -> daher mit dieser sortierten weitermachen + detected_LEDs_KS_LED = points_trafo(detected_LEDs, alpha_rad, dx_LED, dy_LED) + if print_additional_info: + print(f"Detected LEDs in KS_LED:(x2, y2):\n {detected_LEDs_KS_LED}") + + #----------------------------------- + # draw useful lines and points + + # draw lane line + if draw_opencv: + pt_0 = (a+b*np.array([-300, -300])).astype(np.int32) + pt_1 = (a+b*np.array([300, 300])).astype(np.int32) + #print(f"pt_0: {pt_0}, pt_1: {pt_1}") + cv.line(img_bgr, pt_0, pt_1, (255,255,255),1) # draw lane + + # draw dx dy + cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # shortest distance from KS_0 to KS_LED --> Lot + # cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_0)), (0,0,255), 2) # only dx + # cv.line(img_bgr, (int(x_LED), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # only dy + + #draw additional points + cv.circle(img_bgr, (int(x_2), int(y_2)), 5,(255,128,255),-1) #pink. Center of points + #cv.putText(img_bgr, '(x2, y2)',(int(x_2)+5, int(y_2)-5), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), cv.LINE_AA) + cv.circle(img_bgr, (int(x_LED), int(y_LED)), 5,(170,255,0),-1) # lime green. Fußpunkt + + if show_opencv_window: + cv.imshow("Lane", img_bgr) + + return dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED + +def convert_rgb_to_grayscale_average(image_bgr): + """This function converts the RGB image into an grayscale image. + Algorithm: Average: Y = (R+G+B)/3""" + + # convert dtype to prevent integer overflow while addition + image_bgr = image_bgr.astype(np.uint16, copy=False) + image_gray = (image_bgr[:,:,0]+image_bgr[:,:,1]+image_bgr[:,:,2])/3 # add values / do conversion + image_gray = image_gray.astype(np.uint8, copy=False) # convert back to uint8 + + return image_gray + +def create_detector(params_for_blob_detection): + detector = cv.SimpleBlobDetector_create(params_for_blob_detection) # Set up the detector with specified parameters. + return detector + +def define_parameters_for_blob_detection(): + """set parameters for simple blob detector""" + params = cv.SimpleBlobDetector_Params() + + # Threshold for Convert the source image to binary images by applying thresholding + # with several thresholds from minThreshold (inclusive) to maxThreshold (exclusive) + # with distance thresholdStep between neighboring thresholds. + # Since the Grayscale image is dark if only one color channel is active, + # the Threshold values have to be set like this. + # particularly the thresholdStep-Value has to be low + params.minThreshold=20 # reminder: this value is set for grayscale image + params.maxThreshold=255 + params.thresholdStep=1 + + params.filterByColor=False # do not filter blobs by color + + # Filter blobs by Area + params.filterByArea=True + minDiameter_px = minDiameter_mm*pixels_per_mm # [px] minimum diameter of detected blob/LED + maxDiameter_px = maxDiameter_mm*pixels_per_mm # [px] maximum diameter of detected blob/LED + minArea_px2 = np.pi/4*minDiameter_px**2 + maxArea_px2 = np.pi/4*maxDiameter_px**2 + + params.minArea = minArea_px2 # min Area of a blob in px^2 + # params.maxArea = maxArea_px2 # max Area of a blob in px^2. + # reasons for not filtering maxArea: motion blur + rolling shutter --> larger Area + + # Filter by Inertia + params.filterByInertia=False + params.minInertiaRatio = 0.2 # [0-1] + + # Filter by Convexity + params.filterByConvexity=False + params.minConvexity = 0.2 # [0-1] + + # Filter by Circularity + params.filterByCircularity=False + params.minCircularity = 0.4 # [0-1] + + # params.minDistBetweenBlobs = minDist_px # this has no effect + + return params + + +def detect_LED_positions_in_grayscale(image_gray, image_bgr, detector): + + keypoints = detector.detect(image_gray) # Detect blobs --> LEDs + number_of_detected_leds = len(keypoints) + + if number_of_detected_leds != 0: + # print information of keypoints + print(f"detected LEDs: {number_of_detected_leds}") + + #Pre-allocate matrix for numpy + number_of_rows = number_of_detected_leds + number_of_columns = 3 + position_of_leds = np.zeros((number_of_rows, number_of_columns), dtype=np.uint16) + for i, k in enumerate(keypoints): + # x_pos = round(k.pt[0],0) # x position + # y_pos = round(k.pt[1],0) # y position + # print(f"x: {x_pos} y: {y_pos}") + # diameter_px = round(k.size,2) + # diameter_mm = round(diameter_px*1/pixels_per_mm,2) + # print(f"diameter [px]: {diameter_px} diameter [mm]: {diameter_mm}") # diameter + # area_px2 = round(np.pi/4*k.size**2,0) # area in px^2 + # area_mm2 = round(area_px2*(1/pixels_per_mm)**2,0) + # print(f"area [px^2]: {area_px2} area [mm^2]: {area_mm2}") + # print('') + + # calculate parameters to transfer to matrix + # x_pos = int(np.ceil(x_pos)) + # y_pos = int(np.ceil(y_pos)) + # Fill matrix + # position_of_leds[i,:] = [x_pos,y_pos, 0] + position_of_leds[i,0] = int(np.ceil(k.pt[0])) # x positon + position_of_leds[i,1] = int(np.ceil(k.pt[1])) # y position + + + if draw_opencv: + # draw the keypoints on the original image + # cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob + blobs = cv.drawKeypoints(image=image_bgr, keypoints=keypoints, color=(255, 255, 255), \ + outImage=np.array([]), flags= cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) + + if show_opencv_window: + cv.imshow("grayscale", image_gray) + cv.imshow("Detected", blobs) + return position_of_leds + + else: + print(f"No LEDs were detected") + return None + +def detect_position_of_all_LEDs_grayscale(image_gray, image_bgr, detector): + position_of_LEDs = detect_LED_positions_in_grayscale(image_gray, image_bgr, detector) + + if position_of_LEDs is not None: + return position_of_LEDs + else: + return None + +def get_color_of_leds(matrix_of_LEDs, image_bgr): + # is image_r[y_pos, x_pos] = image_bgr[y_pos,x_pos, 2] ? --> yes. No need to split the color channels. + + offset = 2 # half of length from rectangle which is going to be used to determine the color around the middle point of the blob/led + # offset = 0 --> only the value from the middle point of the blob/led + # offset=1 --> 9 values, offset=2-->25 values + + for led in matrix_of_LEDs: + x_pos = led[0] # uint16 + y_pos = led[1] # uint16 + + # get values of color channels in region around middle point of blob/led: + # +1 at stop index, because it is not inclusive + region_around_blue_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 0] # uint8 + region_around_green_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 1] # uint8 + region_around_red_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 2] # uint8 + + # average of the values + # convert dtype to prevent integer overflow while addition + region_around_red_led = region_around_red_led.astype(np.uint16, copy=False) + region_around_green_led = region_around_green_led.astype(np.uint16, copy=False) + region_around_blue_led = region_around_blue_led.astype(np.uint16, copy=False) + # sum all elements in matrix and divide with number of elements + number_of_elements= region_around_blue_led.size + value_of_red_led = region_around_red_led.sum()/number_of_elements # float64, if not integer result + value_of_green_led = region_around_green_led.sum()/number_of_elements # float64, if not integer result + value_of_blue_led = region_around_blue_led.sum()/number_of_elements # float64, if not integer result + + # determine which leds are active: + # if value > threshold --> led is active + status_blue_led = False; status_green_led = False; status_red_led = False + if value_of_blue_led > threshold_color_detection: + status_blue_led = True + if value_of_green_led > threshold_color_detection: + status_green_led = True + if value_of_red_led > threshold_color_detection: + status_red_led = True + + # determine color by checking the cases: + # case 1: red + if status_blue_led==False and status_green_led==False and status_red_led==True: + color = color_number_red + # case 2: green + elif status_blue_led==False and status_green_led==True and status_red_led==False: + color = color_number_green + # case 3: blue + elif status_blue_led==True and status_green_led==False and status_red_led==False: + color = color_number_blue + # case 4: yellow = red + green + elif status_blue_led==False and status_green_led==True and status_red_led==True: + color = color_number_yellow + # case 5: magenta = red + blue + elif status_blue_led==True and status_green_led==False and status_red_led==True: + color = color_number_magenta + # case 6: cyan = green + blue + elif status_blue_led==True and status_green_led==True and status_red_led==False: + color = color_number_cyan + # case 7: white = red + green + blue + elif status_blue_led==True and status_green_led==True and status_red_led==True: + color = color_number_white + # case 8: led not active + # this case can not occur, because no inactive led can be detected from the implemented blob-algorithm in detect_LED_positions_in_grayscale + else: + color = color_number_off + + # fill matrix with color + led[2] = color # uint16 + + return matrix_of_LEDs + + +def detect_LEDs_with_grayscale(image_bgr, detector): + # convert rgb to grayscale image + # start_m1 = time.perf_counter() + image_gray = convert_rgb_to_grayscale_average(image_bgr) + # end_m1 = time.perf_counter() + # time_processing = end_m1-start_m1 + # time_processing = time_processing*1000 + # time_processing = round(time_processing, 2) + # print(f'processing time conversion: {time_processing} ms') + + # get position of leds + position_of_LEDs = detect_position_of_all_LEDs_grayscale(image_gray=image_gray, image_bgr=image_bgr, detector=detector) + + #position_of_LEDs = None + + if position_of_LEDs is not None: + # determine color of leds and add to matrix + detected_LEDs = get_color_of_leds(position_of_LEDs, image_bgr) + return detected_LEDs + else: + return None + + +def lane_detection(image_bgr, detector): + # Detect LEDs + print(f"Detect LEDs and color:") + detected_LEDs = detect_LEDs_with_grayscale(image_bgr, detector) + + if detected_LEDs is not None: + # Contruct lane + #print(f"_____________________________________") + # print("Contruct lane") + dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED = \ + construct_lane(detected_LEDs, image_bgr) + + # print result + if print_additional_info: + print(f"Detected LEDs relative to KS_Sensor (x,y):\n{detected_LEDs}") + return detected_LEDs + else: + return None + + +def main(): + filenames_of_images = [f for f in os.listdir(folder_path) if f.endswith('.png')] + + # initialise parameters for blob detectio once befor loop for performane + params_for_blob_detection = define_parameters_for_blob_detection() + detector = create_detector(params_for_blob_detection) + + + for i, filename_of_image in enumerate(filenames_of_images): + print(f"image {i+1}/{len(filenames_of_images)}:{filename_of_image}") + full_path_of_image = os.path.join(folder_path, filename_of_image) + image_bgr = cv.imread(full_path_of_image, cv.IMREAD_COLOR) # load original image + + start_processing = time.perf_counter() + + detected_LEDs = lane_detection(image_bgr, detector) + end_processing = time.perf_counter() + time_processing = end_processing-start_processing + time_processing = time_processing*1000 + time_processing = round(time_processing, 2) + print(f'processing time: {time_processing} ms') + + #print(f"_____________________________________") + # show images: + # cv.imshow("Blue channel", image_b) + # cv.imshow("Green channel", image_g) + # cv.imshow("Red channel", image_r) + + if show_opencv_window: + pressed_key = cv.waitKey(0) & 0xff # display and wait if a key is pressed and then continue + if pressed_key == ord('q'): + exit() + cv.destroyAllWindows() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/04_Spurerkennung/dev/SimpleBlobDetector_parameters.jpg b/04_Spurerkennung/dev/SimpleBlobDetector_parameters.jpg new file mode 100644 index 0000000..8ecc0a8 Binary files /dev/null and b/04_Spurerkennung/dev/SimpleBlobDetector_parameters.jpg differ diff --git a/04_Spurerkennung/dev/add_column_to_matrix.py b/04_Spurerkennung/dev/add_column_to_matrix.py new file mode 100644 index 0000000..ceb931b --- /dev/null +++ b/04_Spurerkennung/dev/add_column_to_matrix.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu Mar 3 18:23:42 2022 + +@author: Kenan +""" + +import numpy as np + +detected_LEDs_blue=np.load('detected_LEDs_blue.npy') + +newarr = np.c_[detected_LEDs_blue, np.full((1,8,1),1)] \ No newline at end of file diff --git a/04_Spurerkennung/dev/del_images.py b/04_Spurerkennung/dev/del_images.py new file mode 100644 index 0000000..ce54c8b --- /dev/null +++ b/04_Spurerkennung/dev/del_images.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue Mar 1 19:06:33 2022 + +@author: Kenan + +Bilder lsöchen die über einer bestimmten schutterspeed sind + +""" +import sys, os +folder_path=r"U:\bwsyncshare\images_input" + +files = [f for f in os.listdir(folder_path) if f.endswith('.png')] + + + +for i, file in enumerate(files): + for ss in range(250,2050,50): + ss_str=str(ss) + if ss_str in file: + try: + os.remove(os.path.join(folder_path, file)) + except: + pass + diff --git a/04_Spurerkennung/dev/dev_Fußpunkt.py b/04_Spurerkennung/dev/dev_Fußpunkt.py new file mode 100644 index 0000000..1419690 --- /dev/null +++ b/04_Spurerkennung/dev/dev_Fußpunkt.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +""" +Created on Sun Mar 27 11:30:48 2022 + +@author: Kenan +""" +import numpy as np + + +p = np.array([[2],[5],[-1]]) +#p = np.array([[208],[160]]) +#p= np.transpose(p) +#p = np.array([2,5,-1]) + +a = np.array([[2],[0],[1]]) +#a = np.array([[194.375],[237.375]]) +#a= np.transpose(a) +#a = np.array([2,0,1]) + +b = np.array([[2],[-1],[2]]) +#b = np.array([[0.980569],[-0.19617459]]) +#b= np.transpose(b) +#b = np.array([2,-1,2]) + +c = p-a + +cross= np.cross(b[:,0], c[:,0]) +res = np.linalg.norm(cross)/np.linalg.norm(b) + +# Fußpunkt L +t_0_dot = np.dot(c[:,0], b[:,0]) +t_0_norm = (np.linalg.norm(b[:,0])**2) +t_0 = t_0_dot/t_0_norm + +x_0 = (a+t_0*b)[:,0] diff --git a/04_Spurerkennung/dev/dev_Linefitting_and_angle.py b/04_Spurerkennung/dev/dev_Linefitting_and_angle.py new file mode 100644 index 0000000..2d1777f --- /dev/null +++ b/04_Spurerkennung/dev/dev_Linefitting_and_angle.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +""" +Created on Fri Mar 4 19:02:17 2022 + +@author: Kenan + +Skript um Geradengleichung und winkel der geraden aus einer menge von punkten zu bestimmen + +""" + +import numpy as np + +from matplotlib import pyplot as plt + +# KOSY: Y: Nach oben, X: Nach rechts + +# create array: column: x y radius colornumber +points = np.array([[2,3,5,1], + [7,6,5,1]]) + +points = np.array([[1,1,5,1], + [1,2,5,1], + [2,2,5,1], + [3,1,5,1], + [4,1,5,1], + [5,1,5,1], + [6,1,5,1], + [5,2,5,1], + [4,3,5,1], + [5,5,5,1], + [7,5,5,1], + [2,6,5,1]]) + + +x = points[:,0] # x coordinates +y = points[:,1] # y coordinates + +deg = 1 # Degree of the fitting polynomial + +[m, c] = np.polyfit(x,y,deg) # m*x+c = g(x) + +#Winkelberechnung mit atan2. Mathematisch. Zur X-Achse. Positiv gegen UZS. +x_1=1 +y_1=m*x_1 +alpha_atan2 = np.arctan2(y_1,x_1)*180/np.pi +print(f"alpha atan2: {round(alpha_atan2,2)}°") + +# winkelberechnung selbst. Zur Y-Achse aber +alpha = np.arctan(1/m) # Winkel zur Y-Achse in [rad] +alpha_deg = alpha * 180/np.pi # Winkel in [°] +print(f"alpha: {round(alpha_deg,2)}°") + +f1 = plt.plot(x,y, 'o') +plt.grid(True) + +x_values_to_plot = np.linspace(0,7,100) +y_fitted_line = m*x_values_to_plot+c + +plt.plot(x_values_to_plot, y_fitted_line) \ No newline at end of file diff --git a/04_Spurerkennung/dev/dev_Measurement_Conversion.py b/04_Spurerkennung/dev/dev_Measurement_Conversion.py new file mode 100644 index 0000000..dcd9318 --- /dev/null +++ b/04_Spurerkennung/dev/dev_Measurement_Conversion.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +""" +Created on Fri Apr 1 18:14:57 2022 + +@author: Kenan + Messen wie lange konvertierung in numpy dauert +""" +import numpy as np +import time + +image_width=192 +image_heigth = 160 +number_of_colorchannels=3 + +def convert_rgb_to_grayscale_average(image_bgr): + """This function converts the RGB image into an grayscale image. + Algorithm: Average: Y = (R+G+B)/3""" + + # convert dtype to prevent integer overflow while addition + image_bgr = image_bgr.astype(np.uint16, copy=False) + + image_gray = (image_bgr[:,:,0]+image_bgr[:,:,1]+image_bgr[:,:,2])/3 # add values / do conversion + image_gray = image_gray.astype(np.uint8, copy=False) # convert back to uint8 + + return image_gray + +int_black = 120 # integer for black color/ no color +image_bgr= np.full(\ + (image_heigth,image_width, number_of_colorchannels), \ + int_black, dtype=np.uint8) + +start_m1 = time.perf_counter() +image_gray = convert_rgb_to_grayscale_average(image_bgr) +end_m1 = time.perf_counter() +time_processing = end_m1-start_m1 +time_processing = time_processing*1000 +time_processing = round(time_processing, 2) +print(f'processing time conversion: {time_processing} ms') \ No newline at end of file diff --git a/04_Spurerkennung/dev/dev_Simple_blob_detector.py b/04_Spurerkennung/dev/dev_Simple_blob_detector.py new file mode 100644 index 0000000..42d91ed --- /dev/null +++ b/04_Spurerkennung/dev/dev_Simple_blob_detector.py @@ -0,0 +1,66 @@ +# -*- coding: utf-8 -*- +""" +Created on Tue Mar 15 10:36:30 2022 + +@author: Kenan +""" + +import cv2 as cv +import numpy as np +import math as M +import os + +folder_path=r"U:\bwsyncshare\images_input" +filenames_of_images = [f for f in os.listdir(folder_path) if f.endswith('.png')] + +for i, filename_of_image in enumerate(filenames_of_images): + full_path_of_image = os.path.join(folder_path, filename_of_image) + + image_bgr = cv.imread(full_path_of_image, cv.IMREAD_COLOR) # load original image + image_grayscale = cv.cvtColor(image_bgr, cv.COLOR_BGR2GRAY) + image = image_bgr + image_b,image_g,image_r = cv.split(image_bgr) # Split colour channels and get grayscale images + + + # For better accuracy, binary images are used before finding contours --> apply treshold + image_gray_binary = image.copy() + perc = np.percentile(image_r, 95) # calculate 95 % percentile + ret, tresh=cv.threshold(image_r, perc, 255, cv.THRESH_BINARY) # set pixels above specified percentile to 255, the resto to 0 + + cv.imshow("grayscale", image_grayscale) + cv.imshow("red", image_r) + + params = cv.SimpleBlobDetector_Params() + + params.minThreshold=1 + params.maxThreshold=255 + params.thresholdStep=1 + + params.filterByColor=False + params.filterByArea=True + params.minArea = 300 + params.filterByInertia=False + params.filterByConvexity=False + params.filterByCircularity=False + + + + detector = cv.SimpleBlobDetector_create(params) + keypoints = detector.detect(image_grayscale) + number_of_detected_leds = len(keypoints) + + + for k in keypoints: + print(k.pt[0]) # x + print(k.pt[1]) #y + print(k.size) #diameter + area = M.pi/4*k.size**2 + print(area) + print('') + + blank = np.zeros((1, 1)) + blobs = cv.drawKeypoints(image, keypoints, blank, (0, 0, 255),cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) + + cv.imshow("Blobs Using Area", blobs) + cv.waitKey(0) + cv.destroyAllWindows() \ No newline at end of file diff --git a/04_Spurerkennung/dev/dev_Trafo.py b/04_Spurerkennung/dev/dev_Trafo.py new file mode 100644 index 0000000..7707837 --- /dev/null +++ b/04_Spurerkennung/dev/dev_Trafo.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +""" +Created on Thu Mar 31 19:36:39 2022 + +@author: Kenan +""" + +import numpy as np +from matplotlib import pyplot as plt + +points = np.array([[-2, -3, -1, 0], + [0, 1, -1, -2]]) + +x = points[0,:] # x coordinates +y = points[1,:] # y coordinates +f1 = plt.plot(x,y, 'ko') +plt.grid(True) + +alpha = -45*np.pi/180 + +dx=-1 +dy=-1 +b = np.array([dx,dy]) + +#Verschiebung +x1 = x-dx +y1 = y-dy +plt.plot(x1,y1, 'bo') +plt.grid(True) +plt.axis('equal') + +#Drehung +x_trafo = np.cos(alpha)*x1-np.sin(alpha)*y1 +y_trafo = np.sin(alpha)*x1+np.cos(alpha)*y1 + +plt.plot(x_trafo,y_trafo, 'o') +plt.grid(True) \ No newline at end of file diff --git a/04_Spurerkennung/dev/dev_calc_mean_of_points.py b/04_Spurerkennung/dev/dev_calc_mean_of_points.py new file mode 100644 index 0000000..de37757 --- /dev/null +++ b/04_Spurerkennung/dev/dev_calc_mean_of_points.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +""" +Created on Sun Mar 27 09:28:09 2022 + +@author: Kenan +""" +import numpy as np + +from matplotlib import pyplot as plt + + +#p1 = (1,2) +#p2=(3,4) + +# dritter wert ist farbe von led + +points = np.array([[1,3,5], + [2,4,3]]) + +mean = np.mean(points[:,0:2],1) diff --git a/04_Spurerkennung/dev/dev_color_pattern_detection.py b/04_Spurerkennung/dev/dev_color_pattern_detection.py new file mode 100644 index 0000000..01aee6d --- /dev/null +++ b/04_Spurerkennung/dev/dev_color_pattern_detection.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +""" +Created 05.03.2022 + +@author: Kenan +""" + +import numpy as np + +from itertools import permutations + + +def get_color_pattern(color_array): + # define color patterns + color_pattern_01 = np.array([1, 2, 3]) + color_pattern_50 = np.array([4, 5, 6]) + color_pattern_70 = np.array([7, 8, 9]) + + list_color_patterns = np.vstack((color_pattern_01, color_pattern_50, color_pattern_70)) + # check which cp is detected + for cp in list_color_patterns: + # create permutations + perms = np.array(list(permutations(cp, len(cp)))) # 6 permutations for 3 numbers: nPr(3,3) + #print("perm: ", perms) + # check if one of the permutations is included + for permutation in perms: + if np.array_equal(permutation, color_array): + print(cp) + break # end loop if found, for performance + + +# dritter wert ist farbe von led + +points = np.array([[1,3,7], + [2,4,8], + [2,4,9]]) + +color_array = points[:,2] + +get_color_pattern(color_array) + diff --git a/04_Spurerkennung/dev/dev_grayscale_conversion.py b/04_Spurerkennung/dev/dev_grayscale_conversion.py new file mode 100644 index 0000000..0743c2c --- /dev/null +++ b/04_Spurerkennung/dev/dev_grayscale_conversion.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +""" +Created on Sat Mar 26 17:08:00 2022 + +@author: Kenan +""" + +import cv2 as cv +import numpy as np + +from matplotlib import pyplot as plt + +path = r"U:\images_input\image.png" + +def convert_rgb_to_grayscale_average(image_b, image_g, image_r): + """This function converts the RGB image into an grayscale image. + Algorithm: Average: Y = (R+G+B)/3""" + + image_r = image_r.astype(np.uint16) #sporadische lösung. umwandlung vor addition, damit keine gehler weil uint8 --> overflow + image_g = image_g.astype(np.uint16) + image_b = image_b.astype(np.uint16) + image_gray = (image_b+image_g+image_r)/3 + image_gray = image_gray.astype(np.uint8) + + return image_gray + +def convert_rgb_to_grayscale_average_v2(image_b, image_g, image_r): + """This function converts the RGB image into an grayscale image. + Algorithm: Average: Y = (R+G+B)/3""" + + image_gray = 1/3*image_r + 1/3*image_g + 1/3*image_b # automatically converts into float64 + image_gray = image_gray.astype(np.uint8) + + return image_gray + + + +image_bgr = cv.imread(path, cv.IMREAD_COLOR) # load original image +image_b,image_g,image_r = cv.split(image_bgr) # Split colour channels and get grayscale images + + +image_grayscale_avg = convert_rgb_to_grayscale_average(image_b, image_g, image_r) +plt.imshow(image_grayscale_avg, cmap = 'gray') + +image_grayscale_avg_v2 = convert_rgb_to_grayscale_average_v2(image_b, image_g, image_r) # new alrorithm, cleaner + +# Test if both are same +res = np.all(image_grayscale_avg_v2==image_grayscale_avg) +print(res) # False + +# Explanation: +# row = 227, col = 7 : r=2, g=10, b=0 +# opt1: (r+g+b)/3 = 12/3 = 4 -->int = 4 +# v2: 1/3*2 +1/3*10 + 1/3*0 = 0.6666+3.3333 = 3.9999 -->int = 3 --> FALSE +# conclusion: opt 1 is better \ No newline at end of file diff --git a/04_Spurerkennung/dev/dev_indexierung_Bildmatrix.py b/04_Spurerkennung/dev/dev_indexierung_Bildmatrix.py new file mode 100644 index 0000000..1d7254c --- /dev/null +++ b/04_Spurerkennung/dev/dev_indexierung_Bildmatrix.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +""" +Created on Mon Mar 21 17:49:58 2022 + +@author: Kenan +""" +import cv2 as cv +import numpy as np + +from matplotlib import pyplot as plt + +path = r"U:\bwsyncshare\image.png" +image_bgr = cv.imread(path, cv.IMREAD_COLOR) # load original image +image_b,image_g,image_r = cv.split(image_bgr) # Split colour channels and get grayscale images + +image_r = image_r.astype(np.uint16) +image_g = image_g.astype(np.uint16) +image_b = image_b.astype(np.uint16) + +img_rgb=image_bgr[:,:,::-1] + +image_grayscale = cv.cvtColor(image_bgr, cv.COLOR_BGR2GRAY) # Y = 0.299 R + 0.587 G + 0.114 B +image_grayscale_KG = (image_r+image_g+image_b)/3 +image_grayscale_KG = image_grayscale_KG.astype(int) # quick and dirty way to convert to uint8 array + +image_column = 6 +image_row = 243 +image_bgr_b = image_bgr[image_column, image_row, 0] +image_b_b = image_b[image_column, image_row] + +plt.imshow(img_rgb) + +# get pixel info from specific color channel +print(image_b.shape[0]) # 416 columns +print(image_b.shape[1]) # 320 rows +#image_b[row, column] +print(image_b[230, 20]) + +#image_bgr[row, column, dim] +print(image_bgr[230,20,0]) + +# Test for addition of colors +row = 245 +column = 70 +print('Color addition:') +blue = image_bgr[row,column, 0] +green = image_bgr[row,column, 1] +red = image_bgr[row,column, 2] + +print(f'blue: {blue}') +print(f'green: {green}') +print(f'red: {red}') + +grayscale = image_grayscale[row, column] +grayscale_AVG = image_grayscale_KG[row, column] +print(f'gray Y: {grayscale}') +print(f'gray AVG: {grayscale_AVG}') +image_r = image_r.astype(np.uint16) +image_g = image_g.astype(np.uint16) +image_b = image_b.astype(np.uint16) + +res = image_r[row, column]+image_g[row, column]+image_b[row, column] + +print(f"addition of r g b: {res}") \ No newline at end of file diff --git a/04_Spurerkennung/images_input/-30_0.png b/04_Spurerkennung/images_input/-30_0.png new file mode 100644 index 0000000..bdbf466 Binary files /dev/null and b/04_Spurerkennung/images_input/-30_0.png differ diff --git a/04_Spurerkennung/images_input/01_Readme.txt b/04_Spurerkennung/images_input/01_Readme.txt new file mode 100644 index 0000000..e273a8e --- /dev/null +++ b/04_Spurerkennung/images_input/01_Readme.txt @@ -0,0 +1,2 @@ +Das sind TEstbilder, welche erstellt wurden +Winkel nur geschätzt und ohne KOSY, da für erstellung der bilder nicht relevant \ No newline at end of file diff --git a/04_Spurerkennung/images_input/0_0.png b/04_Spurerkennung/images_input/0_0.png new file mode 100644 index 0000000..2e40c9d Binary files /dev/null and b/04_Spurerkennung/images_input/0_0.png differ diff --git a/04_Spurerkennung/images_input/0_O.png b/04_Spurerkennung/images_input/0_O.png new file mode 100644 index 0000000..9b3ae0e Binary files /dev/null and b/04_Spurerkennung/images_input/0_O.png differ diff --git a/04_Spurerkennung/images_input/0_U.png b/04_Spurerkennung/images_input/0_U.png new file mode 100644 index 0000000..629b2f3 Binary files /dev/null and b/04_Spurerkennung/images_input/0_U.png differ diff --git a/04_Spurerkennung/images_input/135_0.png b/04_Spurerkennung/images_input/135_0.png new file mode 100644 index 0000000..7491fb3 Binary files /dev/null and b/04_Spurerkennung/images_input/135_0.png differ diff --git a/04_Spurerkennung/images_input/135_L.png b/04_Spurerkennung/images_input/135_L.png new file mode 100644 index 0000000..9c59bff Binary files /dev/null and b/04_Spurerkennung/images_input/135_L.png differ diff --git a/04_Spurerkennung/images_input/135_R.png b/04_Spurerkennung/images_input/135_R.png new file mode 100644 index 0000000..4a61178 Binary files /dev/null and b/04_Spurerkennung/images_input/135_R.png differ diff --git a/04_Spurerkennung/images_input/30_0.png b/04_Spurerkennung/images_input/30_0.png new file mode 100644 index 0000000..59bd858 Binary files /dev/null and b/04_Spurerkennung/images_input/30_0.png differ diff --git a/04_Spurerkennung/images_input/90_0.png b/04_Spurerkennung/images_input/90_0.png new file mode 100644 index 0000000..0f212ba Binary files /dev/null and b/04_Spurerkennung/images_input/90_0.png differ diff --git a/04_Spurerkennung/images_input/90_L.png b/04_Spurerkennung/images_input/90_L.png new file mode 100644 index 0000000..d598807 Binary files /dev/null and b/04_Spurerkennung/images_input/90_L.png differ diff --git a/04_Spurerkennung/images_input/90_R.png b/04_Spurerkennung/images_input/90_R.png new file mode 100644 index 0000000..3526f3e Binary files /dev/null and b/04_Spurerkennung/images_input/90_R.png differ diff --git a/04_Spurerkennung/images_input/black.png b/04_Spurerkennung/images_input/black.png new file mode 100644 index 0000000..4be5a5f Binary files /dev/null and b/04_Spurerkennung/images_input/black.png differ diff --git a/90_ZielSW/01_OnlyCapturing_NoProcessing.py b/90_ZielSW/01_OnlyCapturing_NoProcessing.py new file mode 100644 index 0000000..ed08f1d --- /dev/null +++ b/90_ZielSW/01_OnlyCapturing_NoProcessing.py @@ -0,0 +1,156 @@ +# Creation Date: 02.03.2022 +# Author: Kenan Gömek +# This script takes pictures with Picameras VideoPort like it will be used to work with OpenCV and saves it with OpenCV to have the real use case pictures. +# This script is designed for shooting manually images with 'i' +# Press 'q' to exit +# Change camera parameters with v,b,n,m,x,c,o. See code for more information. + + +import cv2 as cv +import picamera +from picamera.array import PiRGBArray +from fractions import Fraction + +import time +from datetime import datetime +import os + +import numpy as np +import matplotlib.pyplot as plt + + +# Define camera settings + + +SENSOR_MODE = 4 # corresponding sensor mode to resolution 1640x1232 +OUTPUT_RESOLUTION = (416, 320) # (1640x1232)/4=(410,308) --> needs to be divisible by 16 --> 416x320 + + +AWB_MODE = 'off' # Auto white balance mode +AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, floar or fraction are valid. +BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100] + #the brighter, the brighter the LEDs and the higher the RGB values and vice versa! +CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100] + +SHUTTER_SPEED = 30000 # [µs] + +ISO = 320 # ISO value +EXPOSURE_MODE = 'off' +FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4 + +SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s + +# Define Funcions +def get_frames_from_camera(): + # Initialise Camera + print('Initalise Camera...') + with picamera.PiCamera() as camera: + with PiRGBArray(camera) as output: + # Set camera settings + camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode + camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage! + camera.framerate = FRAMERATE + + camera.awb_mode = AWB_MODE + camera.awb_gains = AWB_GAINS + + camera.iso = ISO + camera.shutter_speed = SHUTTER_SPEED + # it was found that, you have to set the right shutter speed at the first initalisation of the current runtime of the program. + # The gains (analog, digital) will adjust to this set up. + # After the gains are fixed, they will never change! even if you change the shutter speed during the runtime. + # To get consistent brightness values, set the right shutter speed at initalisation once. + + time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off + camera.exposure_mode = EXPOSURE_MODE + + time.sleep(1) # wait before applying brightness and contrast + camera.brightness = BRIGHTNESS + camera.contrast = CONTRAST + time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings + + # camera.start_preview() # show camera preview through PiCamera interface + # camera.annotate_frame_num=True # Controls whether the current frame number is drawn as an annotation. + + print('Start caputure...') + + for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)): + start_processing = time.perf_counter() + + framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1 + image = frame.array # raw NumPy array without JPEG encoding + + #cv.imshow("Current Frame", image) # display the image without text + output.truncate(0) # clear the stream for next frame + + # processing + + + + # Only uncomment following code if you display the image. No errors if not commented, but higher fps if commented. + # if q is pressed, break from loop. + pressed_key = cv.waitKey(2) & 0xff + if pressed_key == ord('q'): + break + + elif pressed_key == ord('v'): # increase shutterspeed by 5 + shutter_speed = round(shutter_speed+5) + camera.shutter_speed = shutter_speed + time.sleep(2) # wait to shutter speed is applied before querying exposure speed + exposure_speed = camera.exposure_speed + print(f"shutter speed set to: {shutter_speed}") + print(f"retrieved shutter speed: {exposure_speed}") + + elif pressed_key == ord('b'): # increase shutterspeed by 50 + shutter_speed = round(shutter_speed+50) + camera.shutter_speed = shutter_speed + print(f"shutter speed set to: {shutter_speed}") + elif pressed_key == ord('n'): # increase shutterspeed by 500 + shutter_speed = round(shutter_speed+500) + print(f"shutter speed set to: {shutter_speed}") + elif pressed_key == ord('m'): # max shutterspeed + shutter_speed = round(1/FRAMERATE*1e6) + camera.shutter_speed = shutter_speed + print(f"shutter speed set to: {shutter_speed}") + elif pressed_key == ord('x'): # decrease shutterspeed by 500 + shutter_speed = round(shutter_speed-500) + camera.shutter_speed = shutter_speed + print(f"shutter speed set to: {shutter_speed}") + elif pressed_key == ord('c'): # decrease shutterspeed by 50 + shutter_speed = round(shutter_speed-50) + camera.shutter_speed = shutter_speed + print(f"shutter speed set to: {shutter_speed}") + elif pressed_key == ord('o'): # set shutterspeed to 0 + shutter_speed = 0 + camera.shutter_speed = shutter_speed + print(f"shutter speed set to: {shutter_speed}") + + end_processing = time.perf_counter() + time_processing = round(end_processing-start_processing, 2) + time_processing = time_processing*1000 + print(f'processing time: {time_processing} ms') + + + +# ---------------------------------------------------------------------------- +# main +def main(): + # start capturing + get_frames_from_camera() # start capture + + cv.destroyAllWindows() + print('Program finished') + + + +if __name__ == "__main__": + main() + + + + + + + + + diff --git a/90_ZielSW/02_With_Processing_Hough.py b/90_ZielSW/02_With_Processing_Hough.py new file mode 100644 index 0000000..fdf5bf4 --- /dev/null +++ b/90_ZielSW/02_With_Processing_Hough.py @@ -0,0 +1,514 @@ +# Creation Date: 02.03.2022 +# Author: Kenan Gömek +# This code detects the lane with HoughCircles. +# Quit program with 'q' if opencv windows is shows. Else use Ctrl+C. + + +import cv2 as cv +import picamera +from picamera.array import PiRGBArray +from fractions import Fraction + +import time +from datetime import datetime +import os + +import numpy as np +import math as M + +# Define camera settings +SENSOR_MODE = 4 # corresponding sensor mode to resolution 1640x1232 +OUTPUT_RESOLUTION = (192, 144) # (1640x1232)/4=(410,308) --> needs to be divisible by 16 --> 416x320 + + +AWB_MODE = 'off' # Auto white balance mode +AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, floar or fraction are valid. +BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100] + #the brighter, the brighter the LEDs and the higher the RGB values and vice versa! +CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100] + +SHUTTER_SPEED = 50 # [µs] + +ISO = 320 # ISO value +EXPOSURE_MODE = 'off' +FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4 + +SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s + +# Define Functions +# Parameters +pixels_per_mm = 71/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320 +# pixels_per_mm = 107/24.25 #[px/mm] for 120 mm camera height for resolution: 640x480 + +# Offset Camera Sensor in Scooty according to Scooty-KS +x_offset_camera_mm = 100 # [mm] +y_offset_camera_mm = 50 # [mm] +x_offset_camera_px = x_offset_camera_mm*pixels_per_mm # [px] +y_offset_camera_px = y_offset_camera_mm*pixels_per_mm # [px] + +# image parameters +image_heigth = OUTPUT_RESOLUTION[1] # shape [0] +image_width = OUTPUT_RESOLUTION[0]# shape[1] +# calculate center of image +[x_0, y_0] = np.array([image_width/2, image_heigth/2], dtype=np.uint16) + + +threshold_color_detection = 60 # values under this will not be considered as active leds in each color channel + +# Parameters for Blob/LED Detection +minDiameter_mm = 5 # [mm] minimum diameter of detected blob/LED +maxDiameter_mm = 9 # [mm] maximum diameter of detected blob/LED + +# Define color numbers to identify the color channels in the matrix with all detected LEDs. No string, because numpy array should stay uint16 +color_number_off = 0 +color_number_red = 1 +color_number_green = 2 +color_number_blue = 3 +color_number_yellow = 4 +color_number_magenta = 5 +color_number_cyan = 6 +color_number_white = 7 + +show_opencv_window = True # show opencv window +draw_opencv = True # draw lane and so on + +print_additional_info = False + +# Parameters for HoughCircles +dp = 1 # Inverse ratio of the accumulator resolution to the image resolution. For example, if dp=1 , the accumulator has the same resolution as the input image. If dp=2 , the accumulator has half as big width and height. +minDist_mm = 1 # [mm] minimal distance between two circles +minDist_px = int(minDist_mm*pixels_per_mm) # in [px] Minimum distance in px between the centers of the detected circles. If the parameter is too small, multiple neighbor circles may be falsely detected in addition to a true one. If it is too large, some circles may be missed. +minRadius_mm = 3 # [mm] minimum radius of a circle +minRadius_px = int(minRadius_mm*pixels_per_mm) # [px] Minimum circle radius. +maxRadius_mm = 7 # [mm] maximum radius of a circle +maxRadius_px = int(maxRadius_mm*pixels_per_mm) # [px] Maximum circle radius. If <= 0, uses the maximum image dimension. If < 0, returns centers without finding the radius. + +param1 = 150 # 30 First method-specific parameter. In case of HOUGH_GRADIENT , it is the higher threshold of the two passed to the Canny edge detector (the lower one is twice smaller). + # If circles/LEDs should be detected at low shutter speeds, than lower this value + # Upper threshold for the internal Canny edge detector. + # "Gradient value between dark and white" +param2 = 5 # 12 Second method-specific parameter. In case of HOUGH_GRADIENT , it is the accumulator threshold for the circle centers at the detection stage. The smaller it is, the more false circles may be detected. Circles, corresponding to the larger accumulator values, will be returned first. + # By increasing this threshold value, we can ensure that only the best circles, corresponding to larger accumulator values, are returned. + + +def points_trafo(detected_LEDs, alpha_rad, dx, dy): + """Tranfsform points of LED to lane in KS-LED""" + detected_LEDs_trafo = detected_LEDs.copy() # copy, becuase else it is only a pointer + detected_LEDs_trafo = detected_LEDs_trafo.astype(np.int16) # avoid integer overflow + x_pnts = detected_LEDs_trafo[:,0] + y_pnts = detected_LEDs_trafo[:,1] + + # Translation + x1 = x_pnts-dx-x_0 + x_trafo=x1 + y1 = y_pnts-dy-y_0 + y_trafo = y1 + + # Rotation. Winkel Sensor im UZS, also negativ zu mathematischer definiton + x_trafo = np.cos(-alpha_rad)*x1-np.sin(-alpha_rad)*y1 + detected_LEDs_trafo[:,0] = x_trafo + y_trafo = np.sin(-alpha_rad)*x1+np.cos(-alpha_rad)*y1 + detected_LEDs_trafo[:,1] = y_trafo + + #sort points along lane: x_2, y_2 -axis (KS_LED) + detected_LEDs_trafo = detected_LEDs_trafo[detected_LEDs_trafo[:, 0].argsort(kind='quicksort')] + return detected_LEDs_trafo + +def construct_lane(detected_LEDs, img_bgr): + """construct the lane""" + # This function is partially commented in german, because higher math is used + # clearer what is trying to be achieved + + # get points + # xy_pnts = detected_LEDs[:,0:2] + # x_pnts = detected_LEDs[:,0] + # y_pnts = detected_LEDs[:,1] + + # approach 2: + # fit line through centers of LEDs in KS_0 + # DIST_L": the simplest and the fastest least-squares method: the simple euclidean distance + param = 0 # not used for DIST_L2 + reps = 0.001 # Sufficient accuracy for the radius (distance between the coordinate origin and the line). + aeps = 0.001 # Sufficient accuracy for the angle. + [dx, dy, x_2, y_2] = cv.fitLine(detected_LEDs[:,0:2], cv.DIST_L2, param, reps, aeps) + # x2, y2: same as: mean_of_leds = np.mean([x_pnts, y_pnts], 1) + + alpha_rad = np.arctan2(dy, dx) # calculate angle of line + alpha = np.arctan2(dy, dx)*180/np.pi # calculate angle of line + # print(f"Lane: dx: {dx}, dy:{dy}, x2:{x_2}, y2:{y_2}, alpha:{alpha}°") + if print_additional_info: + print(f"Lane: alpha:{alpha[0]}°") + + # get smallest distance to point an line + # Berechnung nach: Repetitorium Höhere Mathematik, Wirth + # Gerade: x = a+ t*b + # Punkt : OP = p + # d = abs(b x (p-a))/(abs(b)) + # info: np.array()[:,0] --> gets only array with 1 dimensions with desired values + p = np.array([x_0, y_0]) + a = np.array([x_2, y_2])[:,0] + b = np.array([np.cos(alpha_rad), np.sin(alpha_rad)])[:,0] # Richtungsvektor + c = p-a + + # Betrag von Vektor: np.linalg.norm(vec) + cross= np.cross(b, c) + d = np.linalg.norm(cross)/np.linalg.norm(b) # distance [px] + #print(f"d: {round(d,2)}") + + # Fußpunkt (X_LED, Y_LED) + t_0_dot = np.dot(c, b) + t_0_norm = (np.linalg.norm(b)**2) + t_0 = t_0_dot/t_0_norm + [x_LED, y_LED] = (a+t_0*b) + if print_additional_info: + print(f"x_LED: {x_LED}, y_LED: {y_LED}") + + # Abstand (dx, dy) Fußpunkt zu KS_0 + dx_LED = x_LED - x_0 + dx_LED_mm = dx_LED*(1/pixels_per_mm) + dy_LED = y_LED - y_0 + dy_LED_mm = dy_LED*(1/pixels_per_mm) + if print_additional_info: + print(f"dx_LED:{dx_LED} [px] , dy_LED:{dy_LED} [px]") + print(f"dx_LED:{dx_LED_mm} [mm] , dy_LED:{dy_LED_mm} [mm]") + + # Abstand (dx, dy) Fußpunkt von Bildmitte zu KS_Scooty + # Diese Werte zurückgeben + dx_LED_scooty = x_LED - x_0 + x_offset_camera_px + dx_LED_scooty_mm = dx_LED_scooty*(1/pixels_per_mm) + dy_LED_scooty = y_LED - y_0 + y_offset_camera_px + dy_LED_scooty_mm = dy_LED_scooty*(1/pixels_per_mm) + if print_additional_info: + print(f"dx_LED_scooty:{dx_LED_scooty} [px] , dy_LED_scooty:{dy_LED_scooty} [px]") + print(f"dx_LED_scooty:{dx_LED_scooty_mm} [mm] , dy_LED_scooty:{dy_LED_scooty_mm} [mm]") + + # Punkte Trafo, um sortierte position der LEDs entlang Spur zu erhalten + # Bei normal detected kann bei vertikaler LED zb Fehler entstehen und dann muster: 211323233 -> daher mit dieser sortierten weitermachen + detected_LEDs_KS_LED = points_trafo(detected_LEDs, alpha_rad, dx_LED, dy_LED) + if print_additional_info: + print(f"Detected LEDs in KS_LED:(x2, y2):\n {detected_LEDs_KS_LED}") + + #----------------------------------- + # draw useful lines and points + + # draw lane line + if draw_opencv: + pt_0 = (a+b*np.array([-300, -300])).astype(np.int32) + pt_1 = (a+b*np.array([300, 300])).astype(np.int32) + #print(f"pt_0: {pt_0}, pt_1: {pt_1}") + cv.line(img_bgr, pt_0, pt_1, (255,255,255),1) # draw lane + + # draw dx dy + cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # shortest distance from KS_0 to KS_LED --> Lot + # cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_0)), (0,0,255), 2) # only dx + # cv.line(img_bgr, (int(x_LED), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # only dy + + #draw additional points + cv.circle(img_bgr, (int(x_2), int(y_2)), 5,(255,128,255),-1) #pink. Center of points + #cv.putText(img_bgr, '(x2, y2)',(int(x_2)+5, int(y_2)-5), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), cv.LINE_AA) + cv.circle(img_bgr, (int(x_LED), int(y_LED)), 5,(170,255,0),-1) # lime green. Fußpunkt + + if show_opencv_window: + cv.imshow("Lane", img_bgr) + + return dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED + +def convert_rgb_to_grayscale_average(image_bgr): + """This function converts the RGB image into an grayscale image. + Algorithm: Average: Y = (R+G+B)/3""" + + # convert dtype to prevent integer overflow while addition + image_bgr = image_bgr.astype(np.uint16, copy=False) + image_gray = (image_bgr[:,:,0]+image_bgr[:,:,1]+image_bgr[:,:,2])/3 # add values / do conversion + image_gray = image_gray.astype(np.uint8, copy=False) # convert back to uint8 + + return image_gray + +def create_detector(params_for_blob_detection): + detector = cv.SimpleBlobDetector_create(params_for_blob_detection) # Set up the detector with specified parameters. + return detector + +def define_parameters_for_blob_detection(): + """set parameters for simple blob detector""" + params = cv.SimpleBlobDetector_Params() + + # Threshold for Convert the source image to binary images by applying thresholding + # with several thresholds from minThreshold (inclusive) to maxThreshold (exclusive) + # with distance thresholdStep between neighboring thresholds. + # Since the Grayscale image is dark if only one color channel is active, + # the Threshold values have to be set like this. + # particularly the thresholdStep-Value has to be low + params.minThreshold=20 # reminder: this value is set for grayscale image + params.maxThreshold=255 + params.thresholdStep=1 + + params.filterByColor=False # do not filter blobs by color + + # Filter blobs by Area + params.filterByArea=False + + # Filter by Inertia + params.filterByInertia=False + + + # Filter by Convexity + params.filterByConvexity=False + + + # Filter by Circularity + params.filterByCircularity=False + + + # params.minDistBetweenBlobs = minDist_px # this has no effect + + return params + + +def detect_LED_positions_in_grayscale(image_gray, image_bgr, detector): + start_processing = time.perf_counter() + #keypoints = detector.detect(image_gray) # Detect blobs --> LEDs + + detected_LEDs = cv.HoughCircles(image_gray, cv.HOUGH_GRADIENT, dp=dp, minDist = minDist_px + , param1=param1, param2=param2, minRadius=minRadius_px, maxRadius=maxRadius_px) + end_processing = time.perf_counter() + time_processing = end_processing-start_processing + time_processing = time_processing*1000 + time_processing = round(time_processing, 2) + print(f'processing time Hough: {time_processing} ms') + + # check if at least one circle was found in the image + if detected_LEDs is not None: + detected_LEDs = np.uint16(np.round(detected_LEDs)) # convert the (x, y) coordinates and radius of the circles to integers + detected_LEDs = detected_LEDs[0,:] + detected_LEDs=np.hstack((detected_LEDs, np.full((detected_LEDs.shape[0],1), 9, dtype=np.uint16))) + # matrix with columns: x, y, r + number_of_detected_LEDs = detected_LEDs.shape[0] + print(f"detected {9} LEDs: {number_of_detected_LEDs}") + + # paramters for drawing + line_thickness = 1 + circle_color = (0,255,0) + vertex_offset = 2 + rectangle_color = (0,128,255) # R G B + for (x, y, r, cn) in detected_LEDs: + print(f"x:{x} px, y:{y} px, r:{r} px, r:{round(r*1/(pixels_per_mm),2)} mm, D: {round(2*r*1/(pixels_per_mm),2)} mm, color: {9}") + cv.circle(image_bgr, (x, y), r, circle_color, thickness=line_thickness) # draw detected circumference of the cirle + cv.rectangle(img=image_bgr, pt1=(x-vertex_offset, y-vertex_offset), pt2=(x+vertex_offset, y+vertex_offset), \ + color=rectangle_color, thickness=cv.FILLED) + cv.imshow(f"HC", image_bgr) + + return None + + else: + print(f"No LEDs were detected") + return None + +def detect_position_of_all_LEDs_grayscale(image_gray, image_bgr, detector): + position_of_LEDs = detect_LED_positions_in_grayscale(image_gray, image_bgr, detector) + + if position_of_LEDs is not None: + return position_of_LEDs + else: + return None + +def get_color_of_leds(matrix_of_LEDs, image_bgr): + # is image_r[y_pos, x_pos] = image_bgr[y_pos,x_pos, 2] ? --> yes. No need to split the color channels. + + offset = 0 # half of length from rectangle which is going to be used to determine the color around the middle point of the blob/led + # offset = 0 --> only the value from the middle point of the blob/led + # offset=1 --> 9 values, offset=2-->25 values + + for led in matrix_of_LEDs: + x_pos = led[0] # uint16 + y_pos = led[1] # uint16 + + # get values of color channels in region around middle point of blob/led: + # +1 at stop index, because it is not inclusive + region_around_blue_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 0] # uint8 + region_around_green_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 1] # uint8 + region_around_red_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 2] # uint8 + + # average of the values + # convert dtype to prevent integer overflow while addition + region_around_red_led = region_around_red_led.astype(np.uint16, copy=False) + region_around_green_led = region_around_green_led.astype(np.uint16, copy=False) + region_around_blue_led = region_around_blue_led.astype(np.uint16, copy=False) + # sum all elements in matrix and divide with number of elements + number_of_elements= region_around_blue_led.size + value_of_red_led = region_around_red_led.sum()/number_of_elements # float64, if not integer result + value_of_green_led = region_around_green_led.sum()/number_of_elements # float64, if not integer result + value_of_blue_led = region_around_blue_led.sum()/number_of_elements # float64, if not integer result + + # determine which leds are active: + # if value > threshold --> led is active + status_blue_led = False; status_green_led = False; status_red_led = False + if value_of_blue_led > threshold_color_detection: + status_blue_led = True + if value_of_green_led > threshold_color_detection: + status_green_led = True + if value_of_red_led > threshold_color_detection: + status_red_led = True + + # determine color by checking the cases: + # case 1: red + if status_blue_led==False and status_green_led==False and status_red_led==True: + color = color_number_red + # case 2: green + elif status_blue_led==False and status_green_led==True and status_red_led==False: + color = color_number_green + # case 3: blue + elif status_blue_led==True and status_green_led==False and status_red_led==False: + color = color_number_blue + # case 4: yellow = red + green + elif status_blue_led==False and status_green_led==True and status_red_led==True: + color = color_number_yellow + # case 5: magenta = red + blue + elif status_blue_led==True and status_green_led==False and status_red_led==True: + color = color_number_magenta + # case 6: cyan = green + blue + elif status_blue_led==True and status_green_led==True and status_red_led==False: + color = color_number_cyan + # case 7: white = red + green + blue + elif status_blue_led==True and status_green_led==True and status_red_led==True: + color = color_number_white + # case 8: led not active + # this case can not occur, because no inactive led can be detected from the implemented blob-algorithm in detect_LED_positions_in_grayscale + else: + color = color_number_off + + # fill matrix with color + led[2] = color # uint16 + + return matrix_of_LEDs + + +def detect_LEDs_with_grayscale(image_bgr, detector): + # convert rgb to grayscale image + # start_m1 = time.perf_counter() + image_gray = convert_rgb_to_grayscale_average(image_bgr) + # end_m1 = time.perf_counter() + # time_processing = end_m1-start_m1 + # time_processing = time_processing*1000 + # time_processing = round(time_processing, 2) + # print(f'processing time conversion: {time_processing} ms') + + # get position of leds + position_of_LEDs = detect_position_of_all_LEDs_grayscale(image_gray=image_gray, image_bgr=image_bgr, detector=detector) + + #position_of_LEDs = None + + if position_of_LEDs is not None: + # determine color of leds and add to matrix + detected_LEDs = get_color_of_leds(position_of_LEDs, image_bgr) + return detected_LEDs + else: + return None + + +def lane_detection(image_bgr, detector): + # Detect LEDs + print(f"Detect LEDs and color:") + detected_LEDs = detect_LEDs_with_grayscale(image_bgr, detector) + + if detected_LEDs is not None: + # Contruct lane + #print(f"_____________________________________") + # print("Contruct lane") + dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED = \ + construct_lane(detected_LEDs, image_bgr) + + # print result + if print_additional_info: + print(f"Detected LEDs relative to image-center(x0,y0):\n{detected_LEDs}") + return detected_LEDs + else: + return None + + + +# Picamera +def get_frames_from_camera(detector): + # Initialise Camera + print('Initialise Camera...') + with picamera.PiCamera() as camera: + with PiRGBArray(camera) as output: + # Set camera settings + camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode + camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage! + camera.framerate = FRAMERATE + + camera.awb_mode = AWB_MODE + camera.awb_gains = AWB_GAINS + + camera.iso = ISO + camera.shutter_speed = SHUTTER_SPEED + # it was found that, you have to set the right shutter speed at the first initalisation of the current runtime of the program. + # The gains (analog, digital) will adjust to this set up. + # After the gains are fixed, they will never change! even if you change the shutter speed during the runtime. + # To get consistent brightness values, set the right shutter speed at initalisation once. + + time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off + camera.exposure_mode = EXPOSURE_MODE + + time.sleep(1) # wait before applying brightness and contrast + camera.brightness = BRIGHTNESS + camera.contrast = CONTRAST + time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings + + # camera.start_preview() # show camera preview through PiCamera interface + # camera.annotate_frame_num=True # Controls whether the current frame number is drawn as an annotation. + + print('Start caputure...') + + for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)): + start_processing = time.perf_counter() + + framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1 + image_bgr = frame.array # raw NumPy array without JPEG encoding + + #cv.imshow("Current Frame", image) # display the image without text + output.truncate(0) # clear the stream for next frame + + # processing + lane_detection(image_bgr, detector) + + + # Only uncomment following code if you display the image. No errors if not commented, but higher fps if commented. + # if q is pressed, break from loop. + pressed_key = cv.waitKey(2) & 0xff + if pressed_key == ord('q'): + break + end_processing = time.perf_counter() + time_processing = round(end_processing-start_processing, 2) + time_processing = time_processing*1000 + print(f'processing time: {time_processing} ms') + + + +# ---------------------------------------------------------------------------- +# main +def main(): + + # initialise parameters for blob detectio once befor loop for performane + params_for_blob_detection = define_parameters_for_blob_detection() + detector = create_detector(params_for_blob_detection) + + # start capturing + get_frames_from_camera(detector) # start capture + + cv.destroyAllWindows() + print('Program finished') + + + +if __name__ == "__main__": + main() + + + + + + + + + diff --git a/90_ZielSW/02_With_Processing_Zielsoftware_Simpleblob.py b/90_ZielSW/02_With_Processing_Zielsoftware_Simpleblob.py new file mode 100644 index 0000000..59e03b3 --- /dev/null +++ b/90_ZielSW/02_With_Processing_Zielsoftware_Simpleblob.py @@ -0,0 +1,514 @@ +# Creation Date: 02.03.2022 +# Author: Kenan Gömek +# This code detects the lane with Simpleblob Detector. +# Quit program with 'q' if opencv windows is shows. Else use Ctrl+C. + + +import cv2 as cv +import picamera +from picamera.array import PiRGBArray +from fractions import Fraction + +import time +from datetime import datetime +import os + +import numpy as np +import math as M + +# Define camera settings +SENSOR_MODE = 4 # corresponding sensor mode to resolution 1640x1232 +# OUTPUT_RESOLUTION = (416, 320) # (1640x1232)/4=(410,308) --> needs to be divisible by 16 --> 416x320 +OUTPUT_RESOLUTION = (192, 144) + +AWB_MODE = 'off' # Auto white balance mode +AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, floar or fraction are valid. +BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100] + #the brighter, the brighter the LEDs and the higher the RGB values and vice versa! +CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100] + +SHUTTER_SPEED = 50 # [µs] + +ISO = 320 # ISO value +EXPOSURE_MODE = 'off' +FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4 + +SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s + +# Parameters +pixels_per_mm = 32/24.25 #[px/mm] for 120 mm camera height for resolution: 192x144 +# pixels_per_mm = 71/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320 +# pixels_per_mm = 107/24.25 #[px/mm] for 120 mm camera height for resolution: 640x480 + +# Offset Camera Sensor in Scooty according to Scooty-KS +x_offset_camera_mm = 100 # [mm] +y_offset_camera_mm = 50 # [mm] +x_offset_camera_px = x_offset_camera_mm*pixels_per_mm # [px] +y_offset_camera_px = y_offset_camera_mm*pixels_per_mm # [px] + +# image parameters +image_heigth = OUTPUT_RESOLUTION[1] # shape [0] +image_width = OUTPUT_RESOLUTION[0]# shape[1] +# calculate center of image +[x_0, y_0] = np.array([image_width/2, image_heigth/2], dtype=np.uint16) + + +threshold_color_detection = 60 # values under this will not be considered as active leds in each color channel + +# Parameters for Blob/LED Detection +minDiameter_mm = 5 # [mm] minimum diameter of detected blob/LED +maxDiameter_mm = 9 # [mm] maximum diameter of detected blob/LED + +# Define color numbers to identify the color channels in the matrix with all detected LEDs. No string, because numpy array should stay uint16 +color_number_off = 0 +color_number_red = 1 +color_number_green = 2 +color_number_blue = 3 +color_number_yellow = 4 +color_number_magenta = 5 +color_number_cyan = 6 +color_number_white = 7 + +show_opencv_window = True # show opencv window +draw_opencv = True # draw lane and so on + +print_additional_info = True + +def points_trafo(detected_LEDs, alpha_rad, dx, dy): + """Tranfsform points of LED to lane in KS-LED""" + detected_LEDs_trafo = detected_LEDs.copy() # copy, becuase else it is only a pointer + detected_LEDs_trafo = detected_LEDs_trafo.astype(np.int16) # avoid integer overflow + x_pnts = detected_LEDs_trafo[:,0] + y_pnts = detected_LEDs_trafo[:,1] + + # Translation + x1 = x_pnts-dx-x_0 + x_trafo=x1 + y1 = y_pnts-dy-y_0 + y_trafo = y1 + + # Rotation. Winkel Sensor im UZS, also negativ zu mathematischer definiton + x_trafo = np.cos(-alpha_rad)*x1-np.sin(-alpha_rad)*y1 + detected_LEDs_trafo[:,0] = x_trafo + y_trafo = np.sin(-alpha_rad)*x1+np.cos(-alpha_rad)*y1 + detected_LEDs_trafo[:,1] = y_trafo + + #sort points along lane: x_2, y_2 -axis (KS_LED) + detected_LEDs_trafo = detected_LEDs_trafo[detected_LEDs_trafo[:, 0].argsort(kind='quicksort')] + return detected_LEDs_trafo + +def construct_lane(detected_LEDs, img_bgr): + """construct the lane""" + # This function is partially commented in german, because higher math is used + # clearer what is trying to be achieved + + # get points + # xy_pnts = detected_LEDs[:,0:2] + # x_pnts = detected_LEDs[:,0] + # y_pnts = detected_LEDs[:,1] + + # approach 2: + # fit line through centers of LEDs in KS_0 + # DIST_L": the simplest and the fastest least-squares method: the simple euclidean distance + param = 0 # not used for DIST_L2 + reps = 0.001 # Sufficient accuracy for the radius (distance between the coordinate origin and the line). + aeps = 0.001 # Sufficient accuracy for the angle. + [dx, dy, x_2, y_2] = cv.fitLine(detected_LEDs[:,0:2], cv.DIST_L2, param, reps, aeps) + # x2, y2: same as: mean_of_leds = np.mean([x_pnts, y_pnts], 1) + + alpha_rad = np.arctan2(dy, dx) # calculate angle of line + alpha = np.arctan2(dy, dx)*180/np.pi # calculate angle of line + # print(f"Lane: dx: {dx}, dy:{dy}, x2:{x_2}, y2:{y_2}, alpha:{alpha}°") + if print_additional_info: + print(f"Lane: alpha:{alpha[0]}°") + + # get smallest distance to point an line + # Berechnung nach: Repetitorium Höhere Mathematik, Wirth + # Gerade: x = a+ t*b + # Punkt : OP = p + # d = abs(b x (p-a))/(abs(b)) + # info: np.array()[:,0] --> gets only array with 1 dimensions with desired values + p = np.array([x_0, y_0]) + a = np.array([x_2, y_2])[:,0] + b = np.array([np.cos(alpha_rad), np.sin(alpha_rad)])[:,0] # Richtungsvektor + c = p-a + + # Betrag von Vektor: np.linalg.norm(vec) + cross= np.cross(b, c) + d = np.linalg.norm(cross)/np.linalg.norm(b) # distance [px] + #print(f"d: {round(d,2)}") + + # Fußpunkt (X_LED, Y_LED) + t_0_dot = np.dot(c, b) + t_0_norm = (np.linalg.norm(b)**2) + t_0 = t_0_dot/t_0_norm + [x_LED, y_LED] = (a+t_0*b) + if print_additional_info: + print(f"x_LED: {x_LED}, y_LED: {y_LED}") + + # Abstand (dx, dy) Fußpunkt zu KS_0 + dx_LED = x_LED - x_0 + dx_LED_mm = dx_LED*(1/pixels_per_mm) + dy_LED = y_LED - y_0 + dy_LED_mm = dy_LED*(1/pixels_per_mm) + if print_additional_info: + print(f"dx_LED:{dx_LED} [px] , dy_LED:{dy_LED} [px]") + print(f"dx_LED:{dx_LED_mm} [mm] , dy_LED:{dy_LED_mm} [mm]") + + # Abstand (dx, dy) Fußpunkt von Bildmitte zu KS_Scooty + # Diese Werte zurückgeben + dx_LED_scooty = x_LED - x_0 + x_offset_camera_px + dx_LED_scooty_mm = dx_LED_scooty*(1/pixels_per_mm) + dy_LED_scooty = y_LED - y_0 + y_offset_camera_px + dy_LED_scooty_mm = dy_LED_scooty*(1/pixels_per_mm) + if print_additional_info: + print(f"dx_LED_scooty:{dx_LED_scooty} [px] , dy_LED_scooty:{dy_LED_scooty} [px]") + print(f"dx_LED_scooty:{dx_LED_scooty_mm} [mm] , dy_LED_scooty:{dy_LED_scooty_mm} [mm]") + + # Punkte Trafo, um sortierte position der LEDs entlang Spur zu erhalten + # Bei normal detected kann bei vertikaler LED zb Fehler entstehen und dann muster: 211323233 -> daher mit dieser sortierten weitermachen + detected_LEDs_KS_LED = points_trafo(detected_LEDs, alpha_rad, dx_LED, dy_LED) + if print_additional_info: + print(f"Detected LEDs in KS_LED:(x2, y2):\n {detected_LEDs_KS_LED}") + + #----------------------------------- + # draw useful lines and points + + # draw lane line + if draw_opencv: + pt_0 = (a+b*np.array([-300, -300])).astype(np.int32) + pt_1 = (a+b*np.array([300, 300])).astype(np.int32) + #print(f"pt_0: {pt_0}, pt_1: {pt_1}") + cv.line(img_bgr, pt_0, pt_1, (255,255,255),1) # draw lane + + # draw dx dy + cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # shortest distance from KS_0 to KS_LED --> Lot + # cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_0)), (0,0,255), 2) # only dx + # cv.line(img_bgr, (int(x_LED), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # only dy + + #draw additional points + cv.circle(img_bgr, (int(x_2), int(y_2)), 5,(255,128,255),-1) #pink. Center of points + #cv.putText(img_bgr, '(x2, y2)',(int(x_2)+5, int(y_2)-5), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), cv.LINE_AA) + cv.circle(img_bgr, (int(x_LED), int(y_LED)), 5,(170,255,0),-1) # lime green. Fußpunkt + + if show_opencv_window: + cv.imshow("Lane", img_bgr) + + return dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED + +def convert_rgb_to_grayscale_average(image_bgr): + """This function converts the RGB image into an grayscale image. + Algorithm: Average: Y = (R+G+B)/3""" + + # convert dtype to prevent integer overflow while addition + image_bgr = image_bgr.astype(np.uint16, copy=False) + image_gray = (image_bgr[:,:,0]+image_bgr[:,:,1]+image_bgr[:,:,2])/3 # add values / do conversion + image_gray = image_gray.astype(np.uint8, copy=False) # convert back to uint8 + + return image_gray + +def create_detector(params_for_blob_detection): + detector = cv.SimpleBlobDetector_create(params_for_blob_detection) # Set up the detector with specified parameters. + return detector + +def define_parameters_for_blob_detection(): + """set parameters for simple blob detector""" + params = cv.SimpleBlobDetector_Params() + + # Threshold for Convert the source image to binary images by applying thresholding + # with several thresholds from minThreshold (inclusive) to maxThreshold (exclusive) + # with distance thresholdStep between neighboring thresholds. + # Since the Grayscale image is dark if only one color channel is active, + # the Threshold values have to be set like this. + # particularly the thresholdStep-Value has to be low + params.minThreshold=20 # reminder: this value is set for grayscale image + params.maxThreshold=255 + params.thresholdStep=1 + + params.filterByColor=False # do not filter blobs by color + + # Filter blobs by Area + params.filterByArea=False + + # Filter by Inertia + params.filterByInertia=False + + + # Filter by Convexity + params.filterByConvexity=False + + + # Filter by Circularity + params.filterByCircularity=False + + + # params.minDistBetweenBlobs = minDist_px # this has no effect + + return params + + +def detect_LED_positions_in_grayscale(image_gray, image_bgr, detector): + start_processing = time.perf_counter() + keypoints = detector.detect(image_gray) # Detect blobs --> LEDs + end_processing = time.perf_counter() + time_processing = end_processing-start_processing + time_processing = time_processing*1000 + time_processing = round(time_processing, 2) + print(f'processing time detector: {time_processing} ms') + + number_of_detected_leds = len(keypoints) + + if number_of_detected_leds != 0: + # print information of keypoints + print(f"detected LEDs: {number_of_detected_leds}") + + #Pre-allocate matrix for numpy + number_of_rows = number_of_detected_leds + number_of_columns = 3 + position_of_leds = np.zeros((number_of_rows, number_of_columns), dtype=np.uint16) + for i, k in enumerate(keypoints): + # x_pos = round(k.pt[0],0) # x position + # y_pos = round(k.pt[1],0) # y position + # print(f"x: {x_pos} y: {y_pos}") + # diameter_px = round(k.size,2) + # diameter_mm = round(diameter_px*1/pixels_per_mm,2) + # print(f"diameter [px]: {diameter_px} diameter [mm]: {diameter_mm}") # diameter + # area_px2 = round(np.pi/4*k.size**2,0) # area in px^2 + # area_mm2 = round(area_px2*(1/pixels_per_mm)**2,0) + # print(f"area [px^2]: {area_px2} area [mm^2]: {area_mm2}") + # print('') + + # calculate parameters to transfer to matrix + # x_pos = int(np.ceil(x_pos)) + # y_pos = int(np.ceil(y_pos)) + # Fill matrix + # position_of_leds[i,:] = [x_pos,y_pos, 0] + position_of_leds[i,0] = int(np.ceil(k.pt[0])) # x positon + position_of_leds[i,1] = int(np.ceil(k.pt[1])) # y position + + + if draw_opencv: + # draw the keypoints on the original image + # cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob + blobs = cv.drawKeypoints(image=image_bgr, keypoints=keypoints, color=(255, 255, 255), \ + outImage=np.array([]), flags= cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) + + if show_opencv_window: + # cv.imshow("grayscale", image_gray) + cv.imshow("Detected", blobs) + return position_of_leds + + else: + print(f"No LEDs were detected") + return None + +def detect_position_of_all_LEDs_grayscale(image_gray, image_bgr, detector): + position_of_LEDs = detect_LED_positions_in_grayscale(image_gray, image_bgr, detector) + + if position_of_LEDs is not None: + return position_of_LEDs + else: + return None + +def get_color_of_leds(matrix_of_LEDs, image_bgr): + # is image_r[y_pos, x_pos] = image_bgr[y_pos,x_pos, 2] ? --> yes. No need to split the color channels. + + offset = 2 # half of length from rectangle which is going to be used to determine the color around the middle point of the blob/led + # offset = 0 --> only the value from the middle point of the blob/led + # offset=1 --> 9 values, offset=2-->25 values + + for led in matrix_of_LEDs: + x_pos = led[0] # uint16 + y_pos = led[1] # uint16 + + # get values of color channels in region around middle point of blob/led: + # +1 at stop index, because it is not inclusive + region_around_blue_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 0] # uint8 + region_around_green_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 1] # uint8 + region_around_red_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 2] # uint8 + + # average of the values + # convert dtype to prevent integer overflow while addition + region_around_red_led = region_around_red_led.astype(np.uint16, copy=False) + region_around_green_led = region_around_green_led.astype(np.uint16, copy=False) + region_around_blue_led = region_around_blue_led.astype(np.uint16, copy=False) + # sum all elements in matrix and divide with number of elements + number_of_elements= region_around_blue_led.size + value_of_red_led = region_around_red_led.sum()/number_of_elements # float64, if not integer result + value_of_green_led = region_around_green_led.sum()/number_of_elements # float64, if not integer result + value_of_blue_led = region_around_blue_led.sum()/number_of_elements # float64, if not integer result + + # determine which leds are active: + # if value > threshold --> led is active + status_blue_led = False; status_green_led = False; status_red_led = False + if value_of_blue_led > threshold_color_detection: + status_blue_led = True + if value_of_green_led > threshold_color_detection: + status_green_led = True + if value_of_red_led > threshold_color_detection: + status_red_led = True + + # determine color by checking the cases: + # case 1: red + if status_blue_led==False and status_green_led==False and status_red_led==True: + color = color_number_red + # case 2: green + elif status_blue_led==False and status_green_led==True and status_red_led==False: + color = color_number_green + # case 3: blue + elif status_blue_led==True and status_green_led==False and status_red_led==False: + color = color_number_blue + # case 4: yellow = red + green + elif status_blue_led==False and status_green_led==True and status_red_led==True: + color = color_number_yellow + # case 5: magenta = red + blue + elif status_blue_led==True and status_green_led==False and status_red_led==True: + color = color_number_magenta + # case 6: cyan = green + blue + elif status_blue_led==True and status_green_led==True and status_red_led==False: + color = color_number_cyan + # case 7: white = red + green + blue + elif status_blue_led==True and status_green_led==True and status_red_led==True: + color = color_number_white + # case 8: led not active + # this case can not occur, because no inactive led can be detected from the implemented blob-algorithm in detect_LED_positions_in_grayscale + else: + color = color_number_off + + # fill matrix with color + led[2] = color # uint16 + + return matrix_of_LEDs + + +def detect_LEDs(image_bgr, detector): + # convert rgb to grayscale image + # start_m1 = time.perf_counter() + image_gray = convert_rgb_to_grayscale_average(image_bgr) + # end_m1 = time.perf_counter() + # time_processing = end_m1-start_m1 + # time_processing = time_processing*1000 + # time_processing = round(time_processing, 2) + # print(f'processing time conversion: {time_processing} ms') + + # get position of leds + position_of_LEDs = detect_position_of_all_LEDs_grayscale(image_gray=image_gray, image_bgr=image_bgr, detector=detector) + + #position_of_LEDs = None + + if position_of_LEDs is not None: + # determine color of leds and add to matrix + detected_LEDs = get_color_of_leds(position_of_LEDs, image_bgr) + return detected_LEDs + else: + return None + + +def lane_detection(image_bgr, detector): + # Detect LEDs + print(f"Detect LEDs and color:") + detected_LEDs = detect_LEDs(image_bgr, detector) + + if detected_LEDs is not None: + # Contruct lane + #print(f"_____________________________________") + # print("Contruct lane") + dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED = \ + construct_lane(detected_LEDs, image_bgr) + + # print result + if print_additional_info: + print(f"Detected LEDs relative to image-center(x0,y0):\n{detected_LEDs}") + return detected_LEDs + else: + return None + + + +# PiCamera +def get_frames_from_camera(detector): + # Initialise Camera + print('Initialise Camera...') + with picamera.PiCamera() as camera: + with PiRGBArray(camera) as output: + # Set camera settings + camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode + camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage! + camera.framerate = FRAMERATE + + camera.awb_mode = AWB_MODE + camera.awb_gains = AWB_GAINS + + camera.iso = ISO + camera.shutter_speed = SHUTTER_SPEED + # it was found that, you have to set the right shutter speed at the first initalisation of the current runtime of the program. + # The gains (analog, digital) will adjust to this set up. + # After the gains are fixed, they will never change! even if you change the shutter speed during the runtime. + # To get consistent brightness values, set the right shutter speed at initalisation once. + + time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off + camera.exposure_mode = EXPOSURE_MODE + + time.sleep(1) # wait before applying brightness and contrast + camera.brightness = BRIGHTNESS + camera.contrast = CONTRAST + time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings + + # camera.start_preview() # show camera preview through PiCamera interface + # camera.annotate_frame_num=True # Controls whether the current frame number is drawn as an annotation. + + print('Start caputure...') + + for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)): + start_processing = time.perf_counter() + + framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1 + image_bgr = frame.array # raw NumPy array without JPEG encoding + + #cv.imshow("Current Frame", image) # display the image without text + output.truncate(0) # clear the stream for next frame + + # processing + lane_detection(image_bgr, detector) + + + # Only uncomment following code if you display the image. No errors if not commented, but higher fps if commented. + # if q is pressed, break from loop. + pressed_key = cv.waitKey(2) & 0xff + if pressed_key == ord('q'): + break + end_processing = time.perf_counter() + time_processing = end_processing-start_processing + time_processing = time_processing*1000 + time_processing = round(time_processing, 2) + print(f'processing time: {time_processing} ms') + + + +# ---------------------------------------------------------------------------- +# main +def main(): + + # initialise parameters for blob detection once befor loop for performane + params_for_blob_detection = define_parameters_for_blob_detection() + detector = create_detector(params_for_blob_detection) + + # start capturing + get_frames_from_camera(detector) # start capture + + cv.destroyAllWindows() + print('Program finished') + + + +if __name__ == "__main__": + main() + + + + + + + + + diff --git a/90_ZielSW/02_With_Processing_Zielsoftware_findContours.py b/90_ZielSW/02_With_Processing_Zielsoftware_findContours.py new file mode 100644 index 0000000..f270af8 --- /dev/null +++ b/90_ZielSW/02_With_Processing_Zielsoftware_findContours.py @@ -0,0 +1,521 @@ +# Creation Date: 02.03.2022 +# Author: Kenan Gömek +# This code detects the lane with findContours. +# Quit program with 'q' if opencv windows is shows. Else use Ctrl+C. + + +import cv2 as cv +import picamera +from picamera.array import PiRGBArray +from fractions import Fraction + +import time +from datetime import datetime +import os + +import numpy as np +import math as M + +# Define camera settings +SENSOR_MODE = 4 # corresponding sensor mode to resolution 1640x1232 +# OUTPUT_RESOLUTION = (416, 320) # (1640x1232)/4=(410,308) --> needs to be divisible by 16 --> 416x320 +OUTPUT_RESOLUTION = (192, 144) + +AWB_MODE = 'off' # Auto white balance mode +AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, floar or fraction are valid. +BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100] + #the brighter, the brighter the LEDs and the higher the RGB values and vice versa! +CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100] + +SHUTTER_SPEED = 50 # [µs] + +ISO = 320 # ISO value +EXPOSURE_MODE = 'off' +FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4 + +SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s + +# Parameters +pixels_per_mm = 32/24.25 #[px/mm] for 120 mm camera height for resolution: 192x144 +# pixels_per_mm = 71/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320 +# pixels_per_mm = 107/24.25 #[px/mm] for 120 mm camera height for resolution: 640x480 + +# Offset Camera Sensor in Scooty according to Scooty-KS +x_offset_camera_mm = 100 # [mm] +y_offset_camera_mm = 50 # [mm] +x_offset_camera_px = x_offset_camera_mm*pixels_per_mm # [px] +y_offset_camera_px = y_offset_camera_mm*pixels_per_mm # [px] + +# image parameters +image_height = OUTPUT_RESOLUTION[1] # shape [0] +image_width = OUTPUT_RESOLUTION[0]# shape[1] +# calculate center of image +[x_0, y_0] = np.array([image_width/2, image_height/2], dtype=np.uint16) + + +threshold_color_detection = 60 + # values under this will not be considered as active leds in each color channel + # see get_color_of_leds() + +# Parameters for Blob/LED Detection +minDiameter_mm = 3.75 # [mm] minimum diameter of detected blob/LED. Must be minimum >0 ! +# maxDiameter_mm = 9 # [mm] maximum diameter of detected blob/LED + +# Define color numbers to identify the color channels in the matrix with all detected LEDs. No string, because numpy array should stay uint16 +color_number_off = 0 +color_number_red = 1 +color_number_green = 2 +color_number_blue = 3 +color_number_yellow = 4 +color_number_magenta = 5 +color_number_cyan = 6 +color_number_white = 7 + +# Parameters for grayscale to binary conversion +binary_threshold = 15 # determined by testing and application + # the higher threshold is, the smaller the diameter of the led, because more high values are extracted +binary_maxval = 255 # values ofer threshold will be set to maxval + +# Parameters for line fitting for lane construction +param = 0 # not used for DIST_L2 +reps = 0.001 # Sufficient accuracy for the radius (distance between the coordinate origin and the line). +aeps = 0.001 # Sufficient accuracy for the angle. + +# Parameters for OpenCV +show_opencv_window = True # show opencv window +draw_opencv = True # draw lane and so on + +print_additional_info = True + +# calculations before start +# Filter blobs by Area --> not implemented anymore, because no need, because detection is good and less time for calculation needed +# more than good trade off! +minDiameter_px = minDiameter_mm*pixels_per_mm # [px] minimum diameter of detected blob/LED +# maxDiameter_px = maxDiameter_mm*pixels_per_mm # [px] maximum diameter of detected blob/LED +minArea_px2 = np.pi/4*minDiameter_px**2 # min Area of a blob in px^2 +# maxArea_px2 = np.pi/4*maxDiameter_px**2 +# minArea = minArea_px2 # min Area of a blob in px^2 +# params.maxArea = maxArea_px2 # max Area of a blob in px^2. +# reasons for not filtering maxArea: motion blur + rolling shutter --> larger Area + + +def points_trafo(detected_LEDs, alpha_rad, dx, dy): + """Tranfsform points of LED to lane in KS-LED""" + detected_LEDs_trafo = detected_LEDs.copy() # copy, becuase else it is only a pointer + detected_LEDs_trafo = detected_LEDs_trafo.astype(np.int16) # avoid integer overflow + x_pnts = detected_LEDs_trafo[:,0] + y_pnts = detected_LEDs_trafo[:,1] + + # Translation + x1 = x_pnts-dx-x_0 + x_trafo = x1 + y1 = y_pnts-dy-y_0 + y_trafo = y1 + + # Rotation. Winkel Sensor im UZS, also negativ zu mathematischer definiton + x_trafo = np.cos(-alpha_rad)*x1-np.sin(-alpha_rad)*y1 + detected_LEDs_trafo[:,0] = x_trafo + y_trafo = np.sin(-alpha_rad)*x1+np.cos(-alpha_rad)*y1 + detected_LEDs_trafo[:,1] = y_trafo + + #sort points along lane: x_2, y_2 -axis (KS_LED) + detected_LEDs_trafo = detected_LEDs_trafo[detected_LEDs_trafo[:, 0].argsort(kind='quicksort')] + return detected_LEDs_trafo + +def construct_lane(detected_LEDs, img_bgr): + """construct the lane""" + # This function is partially commented in german, because higher math is used + # clearer what is trying to be achieved + + # get points + # xy_pnts = detected_LEDs[:,0:2] + # x_pnts = detected_LEDs[:,0] + # y_pnts = detected_LEDs[:,1] + + # approach 2: + # fit line through centers of LEDs in KS_0 + # DIST_L": the simplest and the fastest least-squares method: the simple euclidean distance + [dx, dy, x_2, y_2] = cv.fitLine(detected_LEDs[:,0:2], cv.DIST_L2, param, reps, aeps) + # x2, y2: same as: mean_of_leds = np.mean([x_pnts, y_pnts], 1) + + alpha_rad = np.arctan2(dy, dx) # calculate angle of line + alpha = np.arctan2(dy, dx)*180/np.pi # calculate angle of line + # print(f"Lane: dx: {dx}, dy:{dy}, x2:{x_2}, y2:{y_2}, alpha:{alpha}°") + if print_additional_info: + alpha_print = alpha[0] + alpha_print = float("{0:.2f}".format(alpha_print)) + print(f"Alpha: {alpha_print}°") + + # get smallest distance to point an line + # Berechnung nach: Repetitorium Höhere Mathematik, Wirth + # Gerade: x = a+ t*b + # Punkt : OP = p + # d = abs(b x (p-a))/(abs(b)) + # info: np.array()[:,0] --> gets only array with 1 dimensions with desired values + p = np.array([x_0, y_0]) + a = np.array([x_2, y_2])[:,0] + b = np.array([np.cos(alpha_rad), np.sin(alpha_rad)])[:,0] # Richtungsvektor + c = p-a + + # Betrag von Vektor: np.linalg.norm(vec) + cross= np.cross(b, c) + d = np.linalg.norm(cross)/np.linalg.norm(b) # distance [px] + #print(f"d: {round(d,2)}") + + # Fußpunkt (X_LED, Y_LED) + t_0_dot = np.dot(c, b) + t_0_norm = (np.linalg.norm(b)**2) + t_0 = t_0_dot/t_0_norm + [x_LED, y_LED] = (a+t_0*b) + if print_additional_info: + # convert float32, round and prepare for printing string + x_LED_print = float("{0:.2f}".format(x_LED)) + y_LED_print = float("{0:.2f}".format(y_LED)) + print(f"x_LED: {x_LED_print} [px], y_LED: {y_LED_print} [px]") + + # Abstand (dx, dy) Fußpunkt zu KS_0 + dx_LED = x_LED - x_0 + dx_LED_mm = dx_LED*(1/pixels_per_mm) + dy_LED = y_LED - y_0 + dy_LED_mm = dy_LED*(1/pixels_per_mm) + if print_additional_info: + # convert float32, round and prepare for printing string + dx_LED_print = float("{0:.2f}".format(dx_LED)) + dy_LED_print = float("{0:.2f}".format(dy_LED)) + dx_LED_mm_print = float("{0:.2f}".format(dx_LED_mm)) + dy_LED_mm_print = float("{0:.2f}".format(dy_LED_mm)) + print(f"dx_LED: {dx_LED_print} [px] , dy_LED: {dy_LED_print} [px]") + print(f"dx_LED: {dx_LED_mm_print} [mm] , dy_LED: {dy_LED_mm_print} [mm]") + + # Abstand (dx, dy) Fußpunkt von Bildmitte zu KS_Scooty + # Diese Werte zurückgeben + dx_LED_scooty = x_LED - x_0 + x_offset_camera_px + dx_LED_scooty_mm = dx_LED_scooty*(1/pixels_per_mm) + dy_LED_scooty = y_LED - y_0 + y_offset_camera_px + dy_LED_scooty_mm = dy_LED_scooty*(1/pixels_per_mm) + if print_additional_info: + print(f"dx_LED_scooty: {round(dx_LED_scooty,2)} [px] , dy_LED_scooty: {round(dy_LED_scooty,2)} [px]") + print(f"dx_LED_scooty: {round(dx_LED_scooty_mm,2)} [mm] , dy_LED_scooty: {round(dy_LED_scooty_mm,2)} [mm]") + + # Punkte Trafo, um sortierte position der LEDs entlang Spur zu erhalten + # Bei normal detected kann bei vertikaler LED zb Fehler entstehen und dann muster: 211323233 -> daher mit dieser sortierten weitermachen + detected_LEDs_KS_LED = points_trafo(detected_LEDs, alpha_rad, dx_LED, dy_LED) + if print_additional_info: + print(f"Detected LEDs in KS_LED:(x2, y2):\n {detected_LEDs_KS_LED}") + + #----------------------------------- + # draw useful lines and points + + # draw lane line + if draw_opencv: + pt_0 = (a+b*np.array([-300, -300])).astype(np.int32) + pt_1 = (a+b*np.array([300, 300])).astype(np.int32) + #print(f"pt_0: {pt_0}, pt_1: {pt_1}") + cv.line(img_bgr, pt_0, pt_1, (255,255,255),1) # draw lane + + # draw dx dy + cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # shortest distance from KS_0 to KS_LED --> Lot + # cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_0)), (0,0,255), 2) # only dx + # cv.line(img_bgr, (int(x_LED), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # only dy + + #draw additional points + cv.circle(img_bgr, (int(x_2), int(y_2)), 5,(255,128,255),-1) #pink. Center of points + #cv.putText(img_bgr, '(x2, y2)',(int(x_2)+5, int(y_2)-5), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), cv.LINE_AA) + cv.circle(img_bgr, (int(x_LED), int(y_LED)), 5,(170,255,0),-1) # lime green. Fußpunkt + + if show_opencv_window: + cv.imshow("Lane", img_bgr) + + return dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED + +def convert_rgb_to_grayscale_average(image_bgr): + """This function converts the RGB image into an grayscale image. + Algorithm: Average: Y = (R+G+B)/3""" + + # convert dtype to prevent integer overflow while addition + image_bgr = image_bgr.astype(np.uint16, copy=False) + image_gray = (image_bgr[:,:,0]+image_bgr[:,:,1]+image_bgr[:,:,2])/3 # add values / do conversion + image_gray = image_gray.astype(np.uint8, copy=False) # convert back to uint8 + + return image_gray + + +def get_color_of_leds(matrix_of_LEDs, image_bgr): + """Determine color of LEDs at positions and add to matrix""" + # is image_r[y_pos, x_pos] = image_bgr[y_pos,x_pos, 2] ? --> yes. No need to split the color channels. + + offset = 1 # half of length from rectangle which is going to be used to determine the color around the middle point of the blob/led + # offset = 0 --> only the value from the middle point of the blob/led + # offset=1 --> 9 values, offset=2-->25 values + + for led in matrix_of_LEDs: + x_pos = led[0] # uint16 + y_pos = led[1] # uint16 + + # get values of color channels in region around middle point of blob/led: + # +1 at stop index, because it is not inclusive + region_around_blue_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 0] # uint8 + region_around_green_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 1] # uint8 + region_around_red_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 2] # uint8 + + # average of the values + # convert dtype to prevent integer overflow while addition + region_around_red_led = region_around_red_led.astype(np.uint16, copy=False) + region_around_green_led = region_around_green_led.astype(np.uint16, copy=False) + region_around_blue_led = region_around_blue_led.astype(np.uint16, copy=False) + # sum all elements in matrix and divide with number of elements + number_of_elements= region_around_blue_led.size + value_of_red_led = region_around_red_led.sum()/number_of_elements # float64, if not integer result + value_of_green_led = region_around_green_led.sum()/number_of_elements # float64, if not integer result + value_of_blue_led = region_around_blue_led.sum()/number_of_elements # float64, if not integer result + + # determine which leds are active: + # if value > threshold --> led is active + status_blue_led = False; status_green_led = False; status_red_led = False + if value_of_blue_led > threshold_color_detection: + status_blue_led = True + if value_of_green_led > threshold_color_detection: + status_green_led = True + if value_of_red_led > threshold_color_detection: + status_red_led = True + + # determine color by checking the cases: + # case 1: red + if status_blue_led==False and status_green_led==False and status_red_led==True: + color = color_number_red + # case 2: green + elif status_blue_led==False and status_green_led==True and status_red_led==False: + color = color_number_green + # case 3: blue + elif status_blue_led==True and status_green_led==False and status_red_led==False: + color = color_number_blue + # case 4: yellow = red + green + elif status_blue_led==False and status_green_led==True and status_red_led==True: + color = color_number_yellow + # case 5: magenta = red + blue + elif status_blue_led==True and status_green_led==False and status_red_led==True: + color = color_number_magenta + # case 6: cyan = green + blue + elif status_blue_led==True and status_green_led==True and status_red_led==False: + color = color_number_cyan + # case 7: white = red + green + blue + elif status_blue_led==True and status_green_led==True and status_red_led==True: + color = color_number_white + # case 8: led not active + # this case can not occur, because no inactive led can be detected from the implemented blob-algorithm in detect_LED_positions_in_grayscale + else: + color = color_number_off + + # fill matrix with color + led[2] = color # uint16 + + return matrix_of_LEDs + +def get_position_of_LEDs_contours(image_gray, image_bgr): + # create binary image + ret, image_binary = cv.threshold(image_gray, binary_threshold, binary_maxval, cv.THRESH_BINARY) + + # find contours + contour_retrieval_algorithm = cv.RETR_EXTERNAL # retrieves only the extreme outer contours + contours, hierarchy = cv.findContours(image_binary, contour_retrieval_algorithm, cv.CHAIN_APPROX_SIMPLE) + + # analyse contours + number_of_detected_contours = len(contours) + if number_of_detected_contours != 0: + # centroid of contours + # Pre-allocate matrix for numpy + number_of_rows = 0 + number_of_columns = 3 + position_of_leds = np.zeros((number_of_rows, number_of_columns), dtype=np.uint16) #empty: [] + + if draw_opencv: + image_bgr_contours = image_bgr.copy() # create copy of image + # copy is needed to draw on, because else the color of + # the circle is detected als LED-Color + + number_of_detected_LEDs = 0 + for i, cnt in enumerate(contours): + M = cv.moments(cnt) + area = cv.contourArea(cnt) + # diameter = 2*np.sqrt(area/np.pi) + # diameter_mm = diameter*(1/pixels_per_mm) + # print(f"area: {area} [px^2], diameter: {diameter} [px], diamter: {diameter_mm} [mm]") + + # Filter contours by area. minimum Area needs to be at least >0 ! + if area >minArea_px2: + number_of_detected_LEDs += 1 + # prevent zero division + if M['m00']==0: + cx = 0 + cy = 0 + else: + cx = int(M['m10']/M['m00']) + cy = int(M['m01']/M['m00']) + #print(cx, cy) + # add positions to matrix + x_pos = int(cx) # x positon + y_pos = int(cy) # y position + position_of_leds = np.vstack((position_of_leds, \ + np.array([x_pos, y_pos, color_number_off], dtype=np.uint16))) # vstack: row wise + + # draw centroids + if draw_opencv: + radius = 2 + color = (255,255,255) + thickness = -1 # filled + cv.circle(image_bgr_contours,(cx,cy), radius, color, thickness) + + if number_of_detected_LEDs != 0: + if print_additional_info: + print(f"detected LEDs: {number_of_detected_LEDs}") + + if draw_opencv: + # draw contours + contours_to_pass = -1 # pass all contours + color_of_contour = (255,255,255) + line_thickness = 1 + cv.drawContours(image_bgr_contours, contours, contours_to_pass, color_of_contour, line_thickness) + + if show_opencv_window: + cv.imshow("binary", image_binary) + cv.imshow("Contours", image_bgr_contours) + + return position_of_leds + else: + if print_additional_info: + print(f"No LEDs were detected") + return None + + else: + if print_additional_info: + print(f"No contours were detected") + return None + +def detect_position_of_LEDs(image_bgr): + + # convert rgb to grayscale + image_gray = convert_rgb_to_grayscale_average(image_bgr) + if show_opencv_window: + cv.imshow("grayscale", image_gray) + + # get position of leds + position_of_LEDs = get_position_of_LEDs_contours(image_gray, image_bgr) + # position_of_LEDs = None + + return position_of_LEDs + + + +def lane_detection(image_bgr): + # Detect LEDs + if print_additional_info: + print(f"Detect LEDs and color:") + position_of_LEDs = detect_position_of_LEDs(image_bgr) + # detected_LEDs = None # only that following code is not being executet for development + + # Get color of leds + if position_of_LEDs is not None: + detected_LEDs = get_color_of_leds(position_of_LEDs, image_bgr) + # print result + if print_additional_info: + print(f"Detected LEDs relative to image center (x0,y0):\n{detected_LEDs}") + else: + detected_LEDs = None + + + # Contruct lane + if detected_LEDs is not None: + if print_additional_info: + print("\nContruct lane with consideration of camera offset:") + dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED = \ + construct_lane(detected_LEDs, image_bgr) + return detected_LEDs + else: + return None + + + +# PiCamera +def get_frames_from_camera(): + # Initialise Camera + print('Initialise Camera...') + with picamera.PiCamera() as camera: + with PiRGBArray(camera) as output: + # Set camera settings + camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode + camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage! + camera.framerate = FRAMERATE + + camera.awb_mode = AWB_MODE + camera.awb_gains = AWB_GAINS + + camera.iso = ISO + camera.shutter_speed = SHUTTER_SPEED + # it was found that, you have to set the right shutter speed at the first initalisation of the current runtime of the program. + # The gains (analog, digital) will adjust to this set up. + # After the gains are fixed, they will never change! even if you change the shutter speed during the runtime. + # To get consistent brightness values, set the right shutter speed at initalisation once. + + time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off + camera.exposure_mode = EXPOSURE_MODE + + time.sleep(1) # wait before applying brightness and contrast + camera.brightness = BRIGHTNESS + camera.contrast = CONTRAST + time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings + + # camera.start_preview() # show camera preview through PiCamera interface + # camera.annotate_frame_num=True # Controls whether the current frame number is drawn as an annotation. + + print('Start caputure...') + + for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)): + start_processing = time.perf_counter() + + framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1 + image_bgr = frame.array # raw NumPy array without JPEG encoding + + #cv.imshow("Current Frame", image) # display the image without text + output.truncate(0) # clear the stream for next frame + + # processing + lane_detection(image_bgr) + + + # Only uncomment following code if you display the image. No errors if not commented, but higher fps if commented. + # if q is pressed, break from loop. + pressed_key = cv.waitKey(2) & 0xff + if pressed_key == ord('q'): + break + end_processing = time.perf_counter() + time_processing = end_processing-start_processing + time_processing = time_processing*1000 + time_processing = round(time_processing, 2) + print(f'processing time: {time_processing} ms') + + + +# ---------------------------------------------------------------------------- +# main +def main(): + + + # start capturing + get_frames_from_camera() # start capture + + cv.destroyAllWindows() + print('Program finished') + + + +if __name__ == "__main__": + main() + + + + + + + + + diff --git a/90_ZielSW/02_With_Processing_Zielsoftware_findContours_Erkennung.xlsx b/90_ZielSW/02_With_Processing_Zielsoftware_findContours_Erkennung.xlsx new file mode 100644 index 0000000..5818cf4 Binary files /dev/null and b/90_ZielSW/02_With_Processing_Zielsoftware_findContours_Erkennung.xlsx differ diff --git a/90_ZielSW/Final/PAP.pdf b/90_ZielSW/Final/PAP.pdf new file mode 100644 index 0000000..ec05d8c Binary files /dev/null and b/90_ZielSW/Final/PAP.pdf differ diff --git a/90_ZielSW/Final/Spurerkennung.py b/90_ZielSW/Final/Spurerkennung.py new file mode 100644 index 0000000..2007620 --- /dev/null +++ b/90_ZielSW/Final/Spurerkennung.py @@ -0,0 +1,522 @@ +# Creation Date: 02.03.2022 +# Author: Kenan Gömek +# This code detects the lane. +# Quit program with 'q' if opencv windows is shows. Else use Ctrl+C. + +import cv2 as cv +import picamera +from picamera.array import PiRGBArray +from fractions import Fraction + +import time +from datetime import datetime +import os + +import numpy as np +import math as M + +# Define camera settings +SENSOR_MODE = 4 # corresponding sensor mode to resolution 1640x1232 +OUTPUT_RESOLUTION = (192, 144) + +AWB_MODE = 'off' # Auto white balance mode +AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, float or fraction are valid. +BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100] + # the brighter, the brighter the LEDs and the higher the RGB values and vice versa! +CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100] + +SHUTTER_SPEED = 50 # [µs] + +ISO = 320 # ISO value +EXPOSURE_MODE = 'off' +FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4 + +SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s + +# Parameters +pixels_per_mm = 32/24.25 # [px/mm] for 120 mm camera height for resolution: 192x144 + +# Offset Camera Sensor in Scooty according to Scooty-KS +x_offset_camera_mm = 0 # [mm] +y_offset_camera_mm = 0 # [mm] +x_offset_camera_px = x_offset_camera_mm*pixels_per_mm # [px] +y_offset_camera_px = y_offset_camera_mm*pixels_per_mm # [px] + +# image parameters +image_height = OUTPUT_RESOLUTION[1] +image_width = OUTPUT_RESOLUTION[0] +# calculate center of image +[x_0, y_0] = np.array([image_width/2, image_height/2], dtype=np.uint16) + + +threshold_color_detection = 60 + # values (average) under this will not be considered as active leds in each color channel + # see get_color_of_leds()-function + +# Parameters for Blob/LED Detection +minDiameter_mm = 3.75 # [mm] minimum diameter of detected blob/LED. Must be minimum >0 ! + +# Define color numbers to identify the color channels in the matrix with all detected LEDs. +# No string, because numpy array should stay uint16 +color_number_off = 0 +color_number_red = 1 +color_number_green = 2 +color_number_blue = 3 +color_number_yellow = 4 +color_number_magenta = 5 +color_number_cyan = 6 +color_number_white = 7 + +# Parameters for grayscale to binary conversion +binary_threshold = 15 # determined by testing and application + # the higher threshold is, the smaller the diameter of the led, because more high values are extracted +binary_maxval = 255 # values ofer threshold will be set to maxval + +# Parameters for line fitting for lane construction +param = 0 # not used for DIST_L2 +reps = 0.001 # Sufficient accuracy for the radius (distance between the coordinate origin and the line). +aeps = 0.001 # Sufficient accuracy for the angle. + +# Parameters for OpenCV +# OpenCV windows only for demonstration +# They are not needed for functionality +show_opencv_window = True # show opencv window +draw_opencv = True # draw lane and so on + +print_additional_info = True + +# calculations before start +# Filter blobs by Area --> not implemented anymore, because no need, because detection is good and less time for calculation needed +# more than good trade off! +minDiameter_px = minDiameter_mm*pixels_per_mm # [px] minimum diameter of detected blob/LED +minArea_px2 = np.pi/4*minDiameter_px**2 # min Area of a blob in px^2 + + +def points_trafo(detected_LEDs, alpha_rad, dx, dy): + """Tranfsform points of LED to lane in KS-LED""" + detected_LEDs_trafo = detected_LEDs.copy() # copy, becuase else it is only a pointer + detected_LEDs_trafo = detected_LEDs_trafo.astype(np.int16) # avoid integer overflow + x_pnts = detected_LEDs_trafo[:, 0] + y_pnts = detected_LEDs_trafo[:, 1] + + # Translation + x1 = x_pnts-dx-x_0 + x_trafo = x1 + y1 = y_pnts-dy-y_0 + y_trafo = y1 + + # Rotation. Winkel Sensor im UZS, also negativ zu mathematischer definiton + x_trafo = np.cos(-alpha_rad)*x1-np.sin(-alpha_rad)*y1 + detected_LEDs_trafo[:, 0] = x_trafo + y_trafo = np.sin(-alpha_rad)*x1+np.cos(-alpha_rad)*y1 + detected_LEDs_trafo[:, 1] = y_trafo + + # sort points along lane: x_2, y_2 -axis (KS_LED) + detected_LEDs_trafo = detected_LEDs_trafo[detected_LEDs_trafo[:, 0].argsort(kind='quicksort')] + return detected_LEDs_trafo + + +def construct_lane(detected_LEDs, img_bgr): + """construct the lane""" + # This function is partially commented in german, because higher math is used + # clearer what is trying to be achieved + + # get points + # xy_pnts = detected_LEDs[:,0:2] + # x_pnts = detected_LEDs[:,0] + # y_pnts = detected_LEDs[:,1] + + # approach 2: + # fit line through centers of LEDs in KS_0 + # DIST_L": the simplest and the fastest least-squares method: the simple euclidean distance + [dx, dy, x_2, y_2] = cv.fitLine(detected_LEDs[:, 0:2], cv.DIST_L2, param, reps, aeps) + # x2, y2: same as: mean_of_leds = np.mean([x_pnts, y_pnts], 1) + + alpha_rad = np.arctan2(dy, dx) # calculate angle of line + alpha = np.arctan2(dy, dx)*180/np.pi # calculate angle of line + # print(f"Lane: dx: {dx}, dy:{dy}, x2:{x_2}, y2:{y_2}, alpha:{alpha}°") + if print_additional_info: + alpha_print = alpha[0] + alpha_print = float("{0:.2f}".format(alpha_print)) + print(f"Alpha: {alpha_print}°") + + # get smallest distance to point an line + # Berechnung nach: Repetitorium Höhere Mathematik, Wirth + # Gerade: x = a+ t*b + # Punkt : OP = p + # d = abs(b x (p-a))/(abs(b)) + # info: np.array()[:,0] --> gets only array with 1 dimensions with desired values + p = np.array([x_0, y_0]) + a = np.array([x_2, y_2])[:, 0] + b = np.array([np.cos(alpha_rad), np.sin(alpha_rad)])[:, 0] # Richtungsvektor + c = p-a + + # Betrag von Vektor: np.linalg.norm(vec) + cross = np.cross(b, c) + d = np.linalg.norm(cross)/np.linalg.norm(b) # distance [px] + # print(f"d: {round(d,2)}") + + # Fußpunkt (X_LED, Y_LED) + t_0_dot = np.dot(c, b) + t_0_norm = (np.linalg.norm(b)**2) + t_0 = t_0_dot/t_0_norm + [x_LED, y_LED] = (a+t_0*b) + if print_additional_info: + # convert float32, round and prepare for printing string + x_LED_print = float("{0:.2f}".format(x_LED)) + y_LED_print = float("{0:.2f}".format(y_LED)) + print(f"x_LED: {x_LED_print} [px], y_LED: {y_LED_print} [px]") + + # Abstand (dx, dy) Fußpunkt zu KS_0 + dx_LED = x_LED - x_0 + dx_LED_mm = dx_LED*(1/pixels_per_mm) + dy_LED = y_LED - y_0 + dy_LED_mm = dy_LED*(1/pixels_per_mm) + if print_additional_info: + # convert float32, round and prepare for printing string + dx_LED_print = float("{0:.2f}".format(dx_LED)) + dy_LED_print = float("{0:.2f}".format(dy_LED)) + dx_LED_mm_print = float("{0:.2f}".format(dx_LED_mm)) + dy_LED_mm_print = float("{0:.2f}".format(dy_LED_mm)) + print(f"dx_LED: {dx_LED_print} [px] , dy_LED: {dy_LED_print} [px]") + print(f"dx_LED: {dx_LED_mm_print} [mm] , dy_LED: {dy_LED_mm_print} [mm]") + + # Abstand (dx, dy) Fußpunkt von Bildmitte zu KS_Scooty + # Diese Werte zurückgeben + dx_LED_scooty = x_LED - x_0 + x_offset_camera_px + dx_LED_scooty_mm = dx_LED_scooty*(1/pixels_per_mm) + dy_LED_scooty = y_LED - y_0 + y_offset_camera_px + dy_LED_scooty_mm = dy_LED_scooty*(1/pixels_per_mm) + if print_additional_info: + print(f"dx_LED_scooty: {round(dx_LED_scooty,2)} [px] , dy_LED_scooty: {round(dy_LED_scooty,2)} [px]") + print(f"dx_LED_scooty: {round(dx_LED_scooty_mm,2)} [mm] , dy_LED_scooty: {round(dy_LED_scooty_mm,2)} [mm]") + + # Punkte Trafo, um sortierte position der LEDs entlang Spur zu erhalten + # Bei normal detected kann bei vertikaler LED zb Fehler entstehen und dann muster: 211323233 -> daher mit dieser sortierten weitermachen + detected_LEDs_KS_LED = points_trafo(detected_LEDs, alpha_rad, dx_LED, dy_LED) + if print_additional_info: + print(f"Detected LEDs in KS_LED:(x2, y2):\n {detected_LEDs_KS_LED}") + + #----------------------------------- + # draw useful lines and points + + # draw lane line + if draw_opencv: + pt_0 = (a+b*np.array([-300, -300])).astype(np.int32) + pt_1 = (a+b*np.array([300, 300])).astype(np.int32) + # print(f"pt_0: {pt_0}, pt_1: {pt_1}") + line_color = (255, 255, 255) + line_thickness = 1 + cv.line(img_bgr, pt_0, pt_1, line_color, line_thickness) # draw lane + + # draw dx dy + line_color = (0, 0, 255) + line_thickness = 2 + cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_LED)), line_color, line_thickness) # shortest distance from KS_0 to KS_LED --> Lot + # cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_0)), (0, 0, 255), 2) # only dx + # cv.line(img_bgr, (int(x_LED), int(y_0)), (int(x_LED), int(y_LED)), (0, 0, 255), 2) # only dy + + # draw additional points + circle_radius = 5 + circle_color = (255, 128, 255) + circle_thickness = -1 # filled + cv.circle(img_bgr, (int(x_2), int(y_2)), circle_radius, circle_color, circle_thickness) # Center of points + # cv.putText(img_bgr, '(x2, y2)',(int(x_2)+5, int(y_2)-5), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), cv.LINE_AA) + circle_color = (170, 255, 0) + cv.circle(img_bgr, (int(x_LED), int(y_LED)), circle_radius, circle_color, circle_thickness) # Fußpunkt + + if show_opencv_window: + cv.imshow("Lane", img_bgr) + + return dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED + + +def convert_rgb_to_grayscale_average(image_bgr): + """This function converts the RGB image into an grayscale image. + Algorithm: Average: Y = (R+G+B)/3""" + + # convert dtype to prevent integer overflow while addition + image_bgr = image_bgr.astype(np.uint16, copy=False) + image_gray = (image_bgr[:, :, 0]+image_bgr[:, :, 1]+image_bgr[:, :, 2])/3 # add values / do conversion + image_gray = image_gray.astype(np.uint8, copy=False) # convert back to uint8 + + return image_gray + + +def get_color_of_leds(matrix_of_LEDs, image_bgr): + """Determine color of LEDs at positions and add to matrix""" + # is image_r[y_pos, x_pos] = image_bgr[y_pos,x_pos, 2] ? --> yes. No need to split the color channels. + + offset = 1 # half of length from rectangle which is going to be used to determine the color around the middle point of the blob/led + # offset = 0 --> only the value from the middle point of the blob/led + # offset=1 --> 9 values, offset=2-->25 values + + for led in matrix_of_LEDs: + x_pos = led[0] # uint16 + y_pos = led[1] # uint16 + + # get values of color channels in region around middle point of blob/led: + # +1 at stop index, because it is not inclusive + region_around_blue_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 0] # uint8 + region_around_green_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 1] # uint8 + region_around_red_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 2] # uint8 + + # average of the values + # convert dtype to prevent integer overflow while addition + region_around_red_led = region_around_red_led.astype(np.uint16, copy=False) + region_around_green_led = region_around_green_led.astype(np.uint16, copy=False) + region_around_blue_led = region_around_blue_led.astype(np.uint16, copy=False) + # sum all elements in matrix and divide with number of elements + number_of_elements = region_around_blue_led.size + value_of_red_led = region_around_red_led.sum()/number_of_elements # float64, if not integer result + value_of_green_led = region_around_green_led.sum()/number_of_elements # float64, if not integer result + value_of_blue_led = region_around_blue_led.sum()/number_of_elements # float64, if not integer result + + # determine which leds are active: + # if value > threshold --> led is active + status_blue_led = False + status_green_led = False + status_red_led = False + if value_of_blue_led > threshold_color_detection: + status_blue_led = True + if value_of_green_led > threshold_color_detection: + status_green_led = True + if value_of_red_led > threshold_color_detection: + status_red_led = True + + # determine color by checking the cases: + # case 1: red + if status_blue_led==False and status_green_led==False and status_red_led==True: + color = color_number_red + # case 2: green + elif status_blue_led==False and status_green_led==True and status_red_led==False: + color = color_number_green + # case 3: blue + elif status_blue_led==True and status_green_led==False and status_red_led==False: + color = color_number_blue + # case 4: yellow = red + green + elif status_blue_led==False and status_green_led==True and status_red_led==True: + color = color_number_yellow + # case 5: magenta = red + blue + elif status_blue_led==True and status_green_led==False and status_red_led==True: + color = color_number_magenta + # case 6: cyan = green + blue + elif status_blue_led==True and status_green_led==True and status_red_led==False: + color = color_number_cyan + # case 7: white = red + green + blue + elif status_blue_led==True and status_green_led==True and status_red_led==True: + color = color_number_white + # case 8: led not active + # this case can not occur, because no inactive led can be detected from the implemented blob-algorithm in detect_LED_positions_in_grayscale + else: + color = color_number_off + + # fill matrix with color + led[2] = color # uint16 + + return matrix_of_LEDs + +def get_position_of_LEDs_contours(image_gray, image_bgr): + # create binary image + ret, image_binary = cv.threshold(image_gray, binary_threshold, binary_maxval, cv.THRESH_BINARY) + + # find contours + contour_retrieval_algorithm = cv.RETR_EXTERNAL # retrieves only the extreme outer contours + contours, hierarchy = cv.findContours(image_binary, contour_retrieval_algorithm, cv.CHAIN_APPROX_SIMPLE) + + # analyse contours + number_of_detected_contours = len(contours) + if number_of_detected_contours != 0: + # centroid of contours + # Pre-allocate matrix for numpy + number_of_rows = 0 + number_of_columns = 3 + position_of_leds = np.zeros((number_of_rows, number_of_columns), dtype=np.uint16) #empty: [] + + if draw_opencv: + image_bgr_contours = image_bgr.copy() # create copy of image + # copy is needed to draw on, because else the color of + # the circle is detected als LED-Color + + number_of_detected_LEDs = 0 + for i, cnt in enumerate(contours): + M = cv.moments(cnt) + area = cv.contourArea(cnt) + # diameter = 2*np.sqrt(area/np.pi) + # diameter_mm = diameter*(1/pixels_per_mm) + # print(f"area: {area} [px^2], diameter: {diameter} [px], diamter: {diameter_mm} [mm]") + + # Filter contours by area. minimum Area needs to be at least >0 ! + if area >minArea_px2: + number_of_detected_LEDs += 1 + # prevent zero division + if M['m00']==0: + cx = 0 + cy = 0 + else: + cx = int(M['m10']/M['m00']) + cy = int(M['m01']/M['m00']) + #print(cx, cy) + # add positions to matrix + x_pos = int(cx) # x positon + y_pos = int(cy) # y position + position_of_leds = np.vstack((position_of_leds, \ + np.array([x_pos, y_pos, color_number_off], dtype=np.uint16))) # vstack: row wise + + # draw centroids + if draw_opencv: + radius = 2 + color = (255,255,255) + thickness = -1 # filled + cv.circle(image_bgr_contours,(cx,cy), radius, color, thickness) + + if number_of_detected_LEDs != 0: + if print_additional_info: + print(f"detected LEDs: {number_of_detected_LEDs}") + + if draw_opencv: + # draw contours + contours_to_pass = -1 # pass all contours + color_of_contour = (255,255,255) + line_thickness = 1 + cv.drawContours(image_bgr_contours, contours, contours_to_pass, color_of_contour, line_thickness) + + if show_opencv_window: + cv.imshow("binary", image_binary) + cv.imshow("Contours", image_bgr_contours) + + return position_of_leds + else: + if print_additional_info: + print(f"No LEDs were detected") + return None + + else: + if print_additional_info: + print(f"No contours were detected") + return None + +def detect_position_of_LEDs(image_bgr): + + # convert rgb to grayscale + image_gray = convert_rgb_to_grayscale_average(image_bgr) + if show_opencv_window: + cv.imshow("grayscale", image_gray) + + # get position of leds + position_of_LEDs = get_position_of_LEDs_contours(image_gray, image_bgr) + + return position_of_LEDs + + + +def lane_detection(image_bgr): + # Detect LEDs + if print_additional_info: + print(f"Detect LEDs and color:") + position_of_LEDs = detect_position_of_LEDs(image_bgr) + # detected_LEDs = None # only that following code is not being executet for development + + # Get color of leds + if position_of_LEDs is not None: + detected_LEDs = get_color_of_leds(position_of_LEDs, image_bgr) + # print result + if print_additional_info: + print(f"Detected LEDs relative to image center (x0,y0):\n{detected_LEDs}") + else: + detected_LEDs = None + + + # Contruct lane + if detected_LEDs is not None: + if print_additional_info: + print("\nContruct lane with consideration of camera offset:") + dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED = \ + construct_lane(detected_LEDs, image_bgr) + return detected_LEDs + else: + return None + + + +# PiCamera +def get_frames_from_camera(): + # Initialise Camera + print('Initialise Camera...') + with picamera.PiCamera() as camera: + with PiRGBArray(camera) as output: + # Set camera settings + camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode + camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage! + camera.framerate = FRAMERATE + + camera.awb_mode = AWB_MODE + camera.awb_gains = AWB_GAINS + + camera.iso = ISO + camera.shutter_speed = SHUTTER_SPEED + # it was found that, you have to set the right shutter speed at the first initalisation of the current runtime of the program. + # The gains (analog, digital) will adjust to this set up. + # After the gains are fixed, they will never change! even if you change the shutter speed during the runtime. + # To get consistent brightness values, set the right shutter speed at initalisation once. + + time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off + camera.exposure_mode = EXPOSURE_MODE + + time.sleep(1) # wait before applying brightness and contrast + camera.brightness = BRIGHTNESS + camera.contrast = CONTRAST + time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings + + # camera.start_preview() # show camera preview through PiCamera interface + # camera.annotate_frame_num=True # Controls whether the current frame number is drawn as an annotation. + + print('Start caputure...') + + for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)): + start_processing = time.perf_counter() + + framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1 + image_bgr = frame.array # raw NumPy array without JPEG encoding + + #cv.imshow("Current Frame", image) # display the image without text + output.truncate(0) # clear the stream for next frame + + # processing + lane_detection(image_bgr) + + + # Only uncomment following code if you display the image. No errors if not commented, but higher fps if commented. + # if q is pressed, break from loop. + pressed_key = cv.waitKey(2) & 0xff + if pressed_key == ord('q'): + break + end_processing = time.perf_counter() + time_processing = end_processing-start_processing + time_processing = time_processing*1000 + time_processing = round(time_processing, 2) + print(f'processing time: {time_processing} ms') + + + +# ---------------------------------------------------------------------------- +# main +def main(): + + # start capturing + get_frames_from_camera() # start capture + + if show_opencv_window: + cv.destroyAllWindows() # destroy all open cv windows + +if __name__ == "__main__": + main() + + + + + + + + + diff --git a/90_ZielSW/Multiprocessing/Lanedetection_Picamera_V01.py b/90_ZielSW/Multiprocessing/Lanedetection_Picamera_V01.py new file mode 100644 index 0000000..707dea5 --- /dev/null +++ b/90_ZielSW/Multiprocessing/Lanedetection_Picamera_V01.py @@ -0,0 +1,912 @@ + +import cv2 as cv +import numpy as np + +import picamera +from picamera.array import PiRGBArray +from fractions import Fraction + +import time +from datetime import datetime +import os +import sys + +import numpy as np +import math as M + +from multiprocessing import Process, shared_memory + +# Parameters +pixels_per_mm = 71/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320 +# pixels_per_mm = 107/24.25 #[px/mm] for 120 mm camera height for resolution: 640x480 + +# Offset Camera Sensor in Scooty according to Scooty-KS +x_offset_camera_mm = 100 # [mm] +y_offset_camera_mm = 50 # [mm] +x_offset_camera_px = x_offset_camera_mm*pixels_per_mm # [px] +y_offset_camera_px = y_offset_camera_mm*pixels_per_mm # [px] + +# image parameters +image_heigth = 320 # shape [0] +image_width = 416 # shape[1] +# calculate center of image +[x_0, y_0] = np.array([image_width/2, image_heigth/2], dtype=np.uint16) + + +threshold_color_detection = 60 # values under this will not be considered as active leds in each color channel + +# Parameters for Blob/LED Detection +minDiameter_mm = 5 # [mm] minimum diameter of detected blob/LED +maxDiameter_mm = 9 # [mm] maximum diameter of detected blob/LED + +# Define color numbers to identify the color channels in the matrix with all detected LEDs. No string, because numpy array should stay uint16 +color_number_off = 0 +color_number_red = 1 +color_number_green = 2 +color_number_blue = 3 +color_number_yellow = 4 +color_number_magenta = 5 +color_number_cyan = 6 +color_number_white = 7 + +#---------------------------------------------------------------------- +# Define camera settings +SENSOR_MODE = 4 # corresponding sensor mode to resolution +OUTPUT_RESOLUTION = (320, 416) # (width, heigth) +image_width = OUTPUT_RESOLUTION[0] +image_heigth = OUTPUT_RESOLUTION[1] +number_of_colorchannels = 3 # r, g, b +size_of_frame=int(image_heigth*image_heigth*number_of_colorchannels) +frame_dimension = int(number_of_colorchannels) + +AWB_MODE = 'off' # Auto white balance mode +AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, floar or fraction are valid. +BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100] + #the brighter, the brighter the LEDs and the higher the RGB values and vice versa! +CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100] + +SHUTTER_SPEED = 50 # [µs] + +ISO = 320 # ISO value +EXPOSURE_MODE = 'off' +FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4 + +SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s + +# miscellaneous parameters +max_value_of_uint64 = int((2**64) - 1) # @30 fps: konservative calculated driving time: 1.95*1e10 years --> Integer overflow not relevant. + +# settings for development +show_opencv_window = False + +# create shared Memorys for main-process +# shared memory for bools +shm_bools_pre=np.array([False, False, False, False, False, False, False, False, False, False], dtype=np.bool8) # create numpy array with bools stored in it + # [0]: newframe [1]: p_red_finished [2]: p_green_finished [3]: p_blue_finished + # [4]: p_red_started [5]: p_green_started [6]: p_blue_started + # [7]: p_red_start_trigger [8]: p_green_start_trigger [9]: p_blue_start_trigger +size_of_buffer = shm_bools_pre.nbytes +print(f"size of buffer: {size_of_buffer}") # size of buffer: 10 +print(f"shm_bools dtype: {shm_bools_pre.dtype}") # dtype: bool +shm_bools_create = shared_memory.SharedMemory(name="shm_bools", create=True, size=shm_bools_pre.nbytes) # create a new shared memory block +shm_bools = np.ndarray(shm_bools_pre.shape, dtype=shm_bools_pre.dtype, buffer=shm_bools_create.buf) # create a NumPy array backed by shared memory +shm_bools[:] = shm_bools_pre[:] # Copy the original data into shared memory + +# print(shm_bool) +# print(shm_bools.name) + +# shared memory for framenumber +shm_framenumber_pre=np.array([0], dtype=np.uint64) +size_of_buffer = shm_framenumber_pre.nbytes +print(f"size of framenumber-buffer: {size_of_buffer}") #8 +print(f"shm_framenumber dtype: {shm_framenumber_pre.dtype}") #uint64 +shm_framenumber_create = shared_memory.SharedMemory(name="shm_framenumber", create=True, size=shm_framenumber_pre.nbytes) # create a new shared memory block +shm_framenumber = np.ndarray(shm_framenumber_pre.shape, dtype=shm_framenumber_pre.dtype, buffer=shm_framenumber_create.buf) # create a NumPy array backed by shared memory +shm_framenumber[:] = shm_framenumber_pre[:] # Copy the original data into shared memory +# print(shm_framenumber) # [0] +# print(shm_framenumber_create.name) # shm_framenumber + +# shared memory for red, green, blue frame +int_black = 0 # integer for black color/ no color +shm_colorframes_pre = np.full(\ + (image_heigth,image_width), \ + int_black, dtype=np.uint8) +size_of_buffer = shm_colorframes_pre.nbytes +print(f"size of colorframe-buffer: {size_of_buffer}") #133 120 +print(f"shm_colorframes_pre dtype: {shm_colorframes_pre.dtype}") #uint8 +shm_redframe_create = shared_memory.SharedMemory(name="shm_redframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block +shm_greenframe_create = shared_memory.SharedMemory(name="shm_greenframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block +shm_blueframe_create = shared_memory.SharedMemory(name="shm_blueframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block +shm_redframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_redframe_create.buf) # create a NumPy array backed by shared memory +shm_greenframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_greenframe_create.buf) # create a NumPy array backed by shared memory +shm_blueframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_blueframe_create.buf) # create a NumPy array backed by shared memory +shm_redframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory +shm_greenframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory +shm_blueframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory +# shared memory bgr frame +int_black = 0 # integer for black color/ no color +shm_frame_pre = np.full(\ + (image_heigth,image_width, number_of_colorchannels), \ + int_black, dtype=np.uint8) +size_of_buffer = shm_frame_pre.nbytes +print(f"size of colorframe-buffer: {size_of_buffer}") #399 360 = 416 x 320 x 3 +print(f"shm_colorframes_pre dtype: {shm_frame_pre.dtype}") #uint8 +shm_frame_create = shared_memory.SharedMemory(name="shm_frame", create=True, size=shm_frame_pre.nbytes) # create a new shared memory block +shm_frame = np.ndarray(shm_frame_pre.shape, dtype=shm_frame_pre.dtype, buffer=shm_frame_create.buf) # create a NumPy array backed by shared memory +shm_frame[:] = shm_frame_pre[:] # Copy the original data into shared memory + + +# ---------------------------------------------------------------------------- +# Define Functions +def points_trafo(detected_LEDs, alpha_rad, dx, dy): + """Tranfsform points of LED to lane in KS-LED""" + detected_LEDs_trafo = detected_LEDs.copy() # copy, becuase else it is only a pointer + detected_LEDs_trafo = detected_LEDs_trafo.astype(np.int16) # avoid integer overflow + x_pnts = detected_LEDs_trafo[:,0] + y_pnts = detected_LEDs_trafo[:,1] + + # Translation + x1 = x_pnts-dx-x_0 + x_trafo=x1 + y1 = y_pnts-dy-y_0 + y_trafo = y1 + + # Rotation. Winkel Sensor im UZS, also negativ zu mathematischer definiton + x_trafo = np.cos(-alpha_rad)*x1-np.sin(-alpha_rad)*y1 + detected_LEDs_trafo[:,0] = x_trafo + y_trafo = np.sin(-alpha_rad)*x1+np.cos(-alpha_rad)*y1 + detected_LEDs_trafo[:,1] = y_trafo + + #sort points along lane: x_2, y_2 -axis (KS_LED) + detected_LEDs_trafo = detected_LEDs_trafo[detected_LEDs_trafo[:, 0].argsort(kind='quicksort')] + return detected_LEDs_trafo + +def construct_lane(detected_LEDs, img_bgr): + """construct the lane""" + # This function is partially commented in german, because higher math is used + # clearer what is trying to be achieved + + # get points + # xy_pnts = detected_LEDs[:,0:2] + # x_pnts = detected_LEDs[:,0] + # y_pnts = detected_LEDs[:,1] + + # approach 2: + # fit line through centers of LEDs in KS_0 + # DIST_L": the simplest and the fastest least-squares method: the simple euclidean distance + param = 0 # not used for DIST_L2 + reps = 0.001 # Sufficient accuracy for the radius (distance between the coordinate origin and the line). + aeps = 0.001 # Sufficient accuracy for the angle. + [dx, dy, x_2, y_2] = cv.fitLine(detected_LEDs[:,0:2], cv.DIST_L2, param, reps, aeps) + # x2, y2: same as: mean_of_leds = np.mean([x_pnts, y_pnts], 1) + + alpha_rad = np.arctan2(dy, dx) # calculate angle of line + alpha = np.arctan2(dy, dx)*180/np.pi # calculate angle of line + # print(f"Lane: dx: {dx}, dy:{dy}, x2:{x_2}, y2:{y_2}, alpha:{alpha}°") + print(f"Lane: alpha:{alpha[0]}°") + + # get smallest distance to point an line + # Berechnung nach: Repetitorium Höhere Mathematik, Wirth + # Gerade: x = a+ t*b + # Punkt : OP = p + # d = abs(b x (p-a))/(abs(b)) + # info: np.array()[:,0] --> gets only array with 1 dimensions with desired values + p = np.array([x_0, y_0]) + a = np.array([x_2, y_2])[:,0] + b = np.array([np.cos(alpha_rad), np.sin(alpha_rad)])[:,0] # Richtungsvektor + c = p-a + + # Betrag von Vektor: np.linalg.norm(vec) + cross= np.cross(b, c) + d = np.linalg.norm(cross)/np.linalg.norm(b) # distance [px] + #print(f"d: {round(d,2)}") + + # Fußpunkt (X_LED, Y_LED) + t_0_dot = np.dot(c, b) + t_0_norm = (np.linalg.norm(b)**2) + t_0 = t_0_dot/t_0_norm + [x_LED, y_LED] = (a+t_0*b) + #print(f"x_LED: {x_LED}, y_LED: {y_LED}") + + # Abstand (dx, dy) Fußpunkt zu KS_0 + dx_LED = x_LED - x_0 + dx_LED_mm = dx_LED*(1/pixels_per_mm) + dy_LED = y_LED - y_0 + dy_LED_mm = dy_LED*(1/pixels_per_mm) + #print(f"dx_LED:{dx_LED} [px] , dy_LED:{dy_LED} [px]") + #print(f"dx_LED:{dx_LED_mm} [mm] , dy_LED:{dy_LED_mm} [mm]") + + # Abstand (dx, dy) Fußpunkt von Bildmitte zu KS_Scooty + # Diese Werte zurückgeben + dx_LED_scooty = x_LED - x_0 + x_offset_camera_px + dx_LED_scooty_mm = dx_LED_scooty*(1/pixels_per_mm) + dy_LED_scooty = y_LED - y_0 + y_offset_camera_px + dy_LED_scooty_mm = dy_LED_scooty*(1/pixels_per_mm) + #print(f"dx_LED_scooty:{dx_LED_scooty} [px] , dy_LED_scooty:{dy_LED_scooty} [px]") + #print(f"dx_LED_scooty:{dx_LED_scooty_mm} [mm] , dy_LED_scooty:{dy_LED_scooty_mm} [mm]") + + # Punkte Trafo, um sortierte position der LEDs entlang Spur zu erhalten + # Bei normal detected kann bei vertikaler LED zb Fehler entstehen und dann muster: 211323233 -> daher mit dieser sortierten weitermachen + detected_LEDs_KS_LED = points_trafo(detected_LEDs, alpha_rad, dx_LED, dy_LED) + #print(f"Detected LEDs in KS_LED:(x2, y2):\n {detected_LEDs_KS_LED}") + + #----------------------------------- + # draw useful lines and points + + # draw lane line + pt_0 = (a+b*np.array([-300, -300])).astype(np.int32) + pt_1 = (a+b*np.array([300, 300])).astype(np.int32) + #print(f"pt_0: {pt_0}, pt_1: {pt_1}") + cv.line(img_bgr, pt_0, pt_1, (255,255,255),1) # draw lane + + # draw dx dy + cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # shortest distance from KS_0 to KS_LED --> Lot + # cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_0)), (0,0,255), 2) # only dx + # cv.line(img_bgr, (int(x_LED), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # only dy + + #draw additional points + cv.circle(img_bgr, (int(x_2), int(y_2)), 5,(255,128,255),-1) #pink. Center of points + #cv.putText(img_bgr, '(x2, y2)',(int(x_2)+5, int(y_2)-5), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), cv.LINE_AA) + cv.circle(img_bgr, (int(x_LED), int(y_LED)), 5,(170,255,0),-1) # lime green. Fußpunkt + + #cv.imshow("Lane", img_bgr) + + return dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED + +def convert_rgb_to_grayscale_average(image_b, image_g, image_r): + """This function converts the RGB image into an grayscale image. + Algorithm: Average: Y = (R+G+B)/3""" + + # convert dtype to prevent integer overflow while addition + image_r = image_r.astype(np.uint16) + image_g = image_g.astype(np.uint16) + image_b = image_b.astype(np.uint16) + + image_gray = (image_b+image_g+image_r)/3 # add values / do conversion + image_gray = image_gray.astype(np.uint8) # convert back to uint8 + + return image_gray + +def define_parameters_for_blob_detection(): + """set parameters for simple blob detector""" + params = cv.SimpleBlobDetector_Params() + + # Threshold for Convert the source image to binary images by applying thresholding + # with several thresholds from minThreshold (inclusive) to maxThreshold (exclusive) + # with distance thresholdStep between neighboring thresholds. + # Since the Grayscale image is dark if only one color channel is active, + # the Threshold values have to be set like this. + # particularly the thresholdStep-Value has to be low + params.minThreshold=20 # reminder: this value is set for grayscale image + params.maxThreshold=255 + params.thresholdStep=1 + + params.filterByColor=False # do not filter blobs by color + + # Filter blobs by Area + params.filterByArea=False + minDiameter_px = minDiameter_mm*pixels_per_mm # [px] minimum diameter of detected blob/LED + maxDiameter_px = maxDiameter_mm*pixels_per_mm # [px] maximum diameter of detected blob/LED + minArea_px2 = np.pi/4*minDiameter_px**2 + maxArea_px2 = np.pi/4*maxDiameter_px**2 + + params.minArea = minArea_px2 # min Area of a blob in px^2 + # params.maxArea = maxArea_px2 # max Area of a blob in px^2. + # reasons for not filtering maxArea: motion blur + rolling shutter --> larger Area + + # Filter by Inertia + params.filterByInertia=False + params.minInertiaRatio = 0.2 # [0-1] + + # Filter by Convexity + params.filterByConvexity=False + params.minConvexity = 0.2 # [0-1] + + # Filter by Circularity + params.filterByCircularity=False + params.minCircularity = 0.4 # [0-1] + + # params.minDistBetweenBlobs = minDist_px # this has no effect + + return params + +def detect_LED_positions_in_grayscale(image_gray, image_bgr, params_for_blob_detection): + + detector = cv.SimpleBlobDetector_create(params_for_blob_detection) # Set up the detector with specified parameters. + keypoints = detector.detect(image_gray) # Detect blobs --> LEDs + number_of_detected_leds = len(keypoints) + + if number_of_detected_leds != 0: + # print information of keypoints + print(f"detected LEDs: {number_of_detected_leds}") + + #Pre-allocate matrix for numpy + number_of_rows = number_of_detected_leds + number_of_columns = 3 + position_of_leds = np.zeros((number_of_rows, number_of_columns), dtype=np.uint16) + for i, k in enumerate(keypoints): + x_pos = round(k.pt[0],0) # x position + y_pos = round(k.pt[1],0) # y position + # print(f"x: {x_pos} y: {y_pos}") + diameter_px = round(k.size,2) + diameter_mm = round(diameter_px*1/pixels_per_mm,2) + # print(f"diameter [px]: {diameter_px} diameter [mm]: {diameter_mm}") # diameter + area_px2 = round(M.pi/4*k.size**2,0) # area in px^2 + area_mm2 = round(area_px2*(1/pixels_per_mm)**2,0) + # print(f"area [px^2]: {area_px2} area [mm^2]: {area_mm2}") + # print('') + + # calculate parameters to transfer to matrix + x_pos = int(np.ceil(x_pos)) + y_pos = int(np.ceil(y_pos)) + # Fill matrix + # position_of_leds[i,:] = [x_pos,y_pos, 0] + position_of_leds[i,0] = x_pos + position_of_leds[i,1] = y_pos + + + # draw the keypoints on the original image + # cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob + blobs = cv.drawKeypoints(image=image_bgr, keypoints=keypoints, color=(255, 255, 255), \ + outImage=np.array([]), flags= cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) + + # cv.imshow("grayscale", image_gray) + #cv.imshow("Detected", blobs) + + return position_of_leds + + else: + print(f"No LEDs were detected") + return None + +def detect_position_of_all_LEDs_grayscale(image_gray, image_bgr, params_for_blob_detection): + position_of_LEDs = detect_LED_positions_in_grayscale(image_gray, image_bgr, params_for_blob_detection) + + if position_of_LEDs is not None: + return position_of_LEDs + else: + return None + +def get_color_of_leds(matrix_of_LEDs, image_bgr): + # is image_r[y_pos, x_pos] = image_bgr[y_pos,x_pos, 2] ? --> yes. No need to split the color channels. + + offset = 2 # half of length from rectangle which is going to be used to determine the color around the middle point of the blob/led + # offset = 0 --> only the value from the middle point of the blob/led + # offset=1 --> 9 values, offset=2-->25 values + + for led in matrix_of_LEDs: + x_pos = led[0] # uint16 + y_pos = led[1] # uint16 + + # get values of color channels in region around middle point of blob/led: + # +1 at stop index, because it is not inclusive + region_around_blue_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 0] # uint8 + region_around_green_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 1] # uint8 + region_around_red_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 2] # uint8 + + # average of the values + # convert dtype to prevent integer overflow while addition + region_around_red_led = region_around_red_led.astype(np.uint16) + region_around_green_led = region_around_green_led.astype(np.uint16) + region_around_blue_led = region_around_blue_led .astype(np.uint16) + # sum all elements in matrix and divide with number of elements + number_of_elements= region_around_blue_led.size + value_of_red_led = region_around_red_led.sum()/number_of_elements # float64, if not integer result + value_of_green_led = region_around_green_led.sum()/number_of_elements # float64, if not integer result + value_of_blue_led = region_around_blue_led.sum()/number_of_elements # float64, if not integer result + + # determine which leds are active: + # if value > threshold --> led is active + status_blue_led = False; status_green_led = False; status_red_led = False + if value_of_blue_led > threshold_color_detection: + status_blue_led = True + if value_of_green_led > threshold_color_detection: + status_green_led = True + if value_of_red_led > threshold_color_detection: + status_red_led = True + + # determine color by checking the cases: + # case 1: red + if status_blue_led==False and status_green_led==False and status_red_led==True: + color = color_number_red + # case 2: green + elif status_blue_led==False and status_green_led==True and status_red_led==False: + color = color_number_green + # case 3: blue + elif status_blue_led==True and status_green_led==False and status_red_led==False: + color = color_number_blue + # case 4: yellow = red + green + elif status_blue_led==False and status_green_led==True and status_red_led==True: + color = color_number_yellow + # case 5: magenta = red + blue + elif status_blue_led==True and status_green_led==False and status_red_led==True: + color = color_number_magenta + # case 6: cyan = green + blue + elif status_blue_led==True and status_green_led==True and status_red_led==False: + color = color_number_cyan + # case 7: white = red + green + blue + elif status_blue_led==True and status_green_led==True and status_red_led==True: + color = color_number_white + # case 8: led not active + # this case can not occur, because no inactive led can be detected from the implemented blob-algorithm in detect_LED_positions_in_grayscale + else: + color = color_number_off + + # fill matrix with color + led[2] = color # uint16 + + return matrix_of_LEDs + + +def detect_LEDs_with_grayscale(image_b, image_g, image_r, image_bgr, params_for_blob_detection): + # convert rgb to grayscale image + start_m1 = time.perf_counter_ns() + image_gray = convert_rgb_to_grayscale_average(image_b, image_g, image_r) + end_m1 = time.perf_counter_ns() + time_processing = end_m1-start_m1 + time_processing = time_processing*1e-6 + time_processing = round(time_processing, 2) + print(f'processing time convert image: {time_processing} ms') + + + # get position of leds + position_of_LEDs = detect_position_of_all_LEDs_grayscale(image_gray=image_gray, image_bgr=image_bgr, params_for_blob_detection=params_for_blob_detection) + + if position_of_LEDs is not None: + # determine color of leds and add to matrix + detected_LEDs = get_color_of_leds(position_of_LEDs, image_bgr) + return detected_LEDs + else: + return None + +def lane_detection(image_bgr, params_for_blob_detection): + start_m1 = time.perf_counter_ns() + image_b,image_g,image_r = cv.split(image_bgr) # Split colour channels and get grayscale images + end_m1 = time.perf_counter_ns() + time_processing = end_m1-start_m1 + time_processing = time_processing*1e-6 + time_processing = round(time_processing, 2) + print(f'processing time split image: {time_processing} ms') + + + # Detect LEDs + print(f"Detect LEDs and color:") + detected_LEDs = detect_LEDs_with_grayscale(image_b, image_g, image_r, image_bgr, params_for_blob_detection) + + if detected_LEDs is not None: + # Contruct lane + #print(f"_____________________________________") + #print("Contruct lane") + dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED = \ + construct_lane(detected_LEDs, image_bgr) + + # print result + #print(f"Detected LEDs relative to image-center(x0,y0):\n{detected_LEDs}") + return detected_LEDs + else: + return None +#------------------- +# Define Funcions +def get_frames_from_picamera(): + # newframe= shm_bools[0] # do not use this! no updted values in "newframe" + # framenumber = shm_framenumber[0] + + # Initialise Camera + with picamera.PiCamera() as camera: + with PiRGBArray(camera) as output: + # Set camera settings + camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode + camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage! + camera.framerate = FRAMERATE + + camera.awb_mode = AWB_MODE + camera.awb_gains = AWB_GAINS + + camera.iso = ISO + camera.shutter_speed = SHUTTER_SPEED + + time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off + camera.exposure_mode = EXPOSURE_MODE + + time.sleep(1) # wait before applying brightness and contrast + camera.brightness = BRIGHTNESS + camera.contrast = CONTRAST + time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings + + t_start= time.perf_counter() # save time for fps calculation + + for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)): + # General information: + # - always the newest frame is recieved: processing must be faster than fps if every frame should be processed + + shm_bools[0] = True + framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1 + + + shm_framenumber[0] = framenumber + #print('') + #print(f"new frame: {framenumber}") + + #image = frame.array # raw NumPy array without JPEG encoding + + #b,g,r = cv.split(frame.array) # split colour channels of raw NumPy array without JPEG encoding + shm_frame[:] = frame.array + #shm_redframe[:] = r + #shm_greenframe[:] = g + #shm_blueframe[:] = b + # for better performance one can assign directly in funtion line the values to the shm_memorys: shm_red, .. , ... = cv.split(..) + + shm_bools[7:10]=[True] # trigger the start of the processing for each colorchannel + #print(shm_bools[7], shm_bools[8], shm_bools[9]) + + #display_image_with_text(image, shutter_speed, framenumber, camera_exposure_speed, trigger_record_OpenCV, out) # show the frame + + output.truncate(0) # clear the stream for next frame + + if framenumber == 500: # 5 sek @ 30 fps, only for performance measuring + t_stop=time.perf_counter() + print(f"calculated fps: {framenumber/(t_stop-t_start)}") + break + + +def display_image_with_text(img, shutter_speed, framenumber, camera_exposure_speed, trigger_record_OpenCV, out): + img = img.copy() # make copy of image and do not modify the original image + + # please activate only one trigger once + trigger_show_brightness = 0 # trigger for (not) calculating andshowing the brightness of the image+ + if trigger_show_brightness == 1: + arithmetic_mean_of_brightness_per_pixel_relative = calc_arithmetic_mean_of_brightness_per_pixel(img) + + trigger_show_max_brightness_values_of_colour_channels = 0 # trigger for (not) calculating and showing max values of colour chanels + if trigger_show_max_brightness_values_of_colour_channels == 1: + r_max, g_max, b_max = get_max_rgb_values(img) + + font = cv.FONT_HERSHEY_SIMPLEX # font + fontScale = 1 # fontScale + color = (255, 255, 255) # Font colour in BGR + thickness = 1 # Line thickness in px + + # set text position + frame_width = int(img.shape[1]) + frame_height = int(img.shape[0]) + text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height + text_linespacing = 50 # line spacing between two strings in pixels + # text_start_position_X = int(frame_width/4) # start text from 1/4 of image width + text_start_position_X = int(0) # start text from left edge of image + + # set position in (x,y)-coordinated from top left corner. Bottom-left corner of the text string in the image. + pos_1 = (text_start_position_X, text_start_position_Y) # start text from 1/4 of image width + pos_2 = (text_start_position_X, text_start_position_Y+text_linespacing) # start text from 1/4 of image width + pos_3 = (text_start_position_X, text_start_position_Y+2*text_linespacing) # start text from 1/4 of image width + if trigger_show_brightness==1 or trigger_show_max_brightness_values_of_colour_channels==1: + pos_4 = (text_start_position_X, text_start_position_Y+3*text_linespacing) # start text from 1/4 of image width + + + # define text to display + text_line_1 = f"set ss: {shutter_speed} us" + text_line_3 = f"Frame: {framenumber}" + text_line_2 = f"ret exs: {camera_exposure_speed} us" + if trigger_show_brightness==1: + if arithmetic_mean_of_brightness_per_pixel_relative >= 0.01: + text_line_4 = f"brightness: {round(arithmetic_mean_of_brightness_per_pixel_relative*100,2)} %" + elif arithmetic_mean_of_brightness_per_pixel_relative < 0.01: + text_line_4 = f"brightness: {round(arithmetic_mean_of_brightness_per_pixel_relative*10e3,2)} pm" + if trigger_show_max_brightness_values_of_colour_channels==1: + text_line_4 = f"max: r:{r_max} g:{g_max} b:{b_max}" + + + # put the text into the image + image_text_1 = cv.putText(img, text_line_1, pos_1, font, + fontScale, color, thickness, cv.LINE_AA) + image_text_2 = cv.putText(img, text_line_2, pos_2, font, + fontScale, color, thickness, cv.LINE_AA) + image_text_3 = cv.putText(img, text_line_3, pos_3, font, + fontScale, color, thickness, cv.LINE_AA) + if trigger_show_brightness==1 or trigger_show_max_brightness_values_of_colour_channels==1: + image_text_4 = cv.putText(img, text_line_4, pos_4, font, + fontScale, color, thickness, cv.LINE_AA) + + + cv.imshow("Current Frame", img) # display the image + if trigger_record_OpenCV == 1: + out.write(img) # write frame to Video + + +def calc_arithmetic_mean_of_brightness_per_pixel(image): + """Calculate overall brightness per pixel of the image. Mittelere Helligkeit pro pixel berechnen.""" + #Comment: So rechenintensiv, dass man kein Blitzen sieht im Bild. (Oder sehr selten bzw. schwach). Daher anzeige von max-werten + + b,g,r = cv.split(image) # OpenCV works with bgr. Image is also speciefied to be captured in bgr. + r=r.astype('uint16') # set dtype of one colour to uint16, because the sum of 255+255+255 >255 =765 + #the rest will also be uint16 then + image_heigth = r.shape[0] + image_width = r.shape[1] + + number_of_colour_channels = 3 + arithmetic_mean_of_brightness_image = np.sum((r+g+b)/number_of_colour_channels) + arithmetic_mean_of_brightness_per_pixel = arithmetic_mean_of_brightness_image/(image_width*image_heigth) + + max_possible_brightness = 255 # maximum possible brightness + arithmetic_mean_of_brightness_per_pixel_relative = arithmetic_mean_of_brightness_per_pixel/max_possible_brightness + + return arithmetic_mean_of_brightness_per_pixel_relative + +def get_max_rgb_values(image): + """get max values of colour channels""" + b,g,r = cv.split(image) # OpenCV works with bgr. Image is also speciefied to be captured in bgr. + + r_max=r.max() + g_max=g.max() + b_max=b.max() + + return r_max, g_max, b_max + +def create_folder_for_captures(): + # Create folder for saving the captured pictures + now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M") + path_cwd = os.getcwd() + + path_saveFolder = path_cwd+r"/Capture_"+d1 + try: + os.mkdir(path_saveFolder) + folder_exists = True + except OSError: + print("Error! Ending script.") + quit() + + return path_saveFolder, folder_exists + +def do_processing(): + time.sleep(0.001) + #print("ohh i am doing high complex image analysis with computer vision") + +def do_processing_frame_r(frame): + print(f"max frame color red: {frame.max()}") +def do_processing_frame_g(frame): + print(f"max frame color green: {frame.max()}") +def do_processing_frame_b(frame): + print(f"max frame color blue: {frame.max()}") + + + +def processing_red(): + shm_bool_init = shared_memory.SharedMemory(name="shm_bools") # Attach to existing shared memory block + shm_bools = np.ndarray((10,), dtype=np.bool8, buffer= shm_bool_init.buf) # do not: newframe = np.array(...)[0] --> then one can not assign new value in main script + newframe = shm_bools[0] + p_red_finished = shm_bools[1] # not used, but for clarity + p_red_started = shm_bools[4] + + shm_framenumber_init = shared_memory.SharedMemory\ + (name="shm_framenumber") # Attach to existing shared memory block + shm_framenumber = np.ndarray((1,), dtype=np.uint64, \ + buffer= shm_framenumber_init.buf) + # framenumer = shm_framenumber[0] + + shm_redframe_init = shared_memory.SharedMemory\ + (name="shm_redframe") # Attach to existing shared memory block + shm_redframe = np.ndarray((image_heigth,image_width), dtype=np.uint8, \ + buffer= shm_redframe_init.buf) + + + i=0 + while True: + try: + framenumber = shm_framenumber[0] + if i==0: + last_processed_frame = framenumber + + conditions_for_first_start = (i==0) and\ + (shm_bools[0] == True) and \ + (shm_bools[1] == False) and (shm_bools[2] == False) and (shm_bools[3] == False) \ + and (shm_bools[7] == True) + + conditions_for_starting_processing = (framenumber>last_processed_frame) and (shm_bools[7] == True) + # newframe and all color-channel-processings have to be finished + + if conditions_for_first_start == True: + shm_bools[4] = True # process started + shm_bools[7] = False # reset trigger + + shm_bools[1] = False # set bool for p_red_finished to false + #t1 = time.perf_counter_ns() + do_processing() + i += 1 + #print(f"first processing red finished. frame: {framenumber}") + shm_bools[1] = True # set bool for p_red_finished to true + + shm_bools[4] = False # process ended + elif conditions_for_starting_processing == True: + shm_bools[4] = True # process started + shm_bools[7] = False # reset trigger + #print(f"red: framenumber: {framenumber}, last_processed_frame: {last_processed_frame}") + + shm_bools[1] = False # set bool for p_red_finished to false + #t1 = time.perf_counter_ns() + do_processing() + #print(f"max frame color red: {shm_redframe.max()}") + if show_opencv_window: + cv.imshow("red", shm_redframe) + cv.waitKey(1) + #print(f"processing red finished. frame: {framenumber}") + shm_bools[1] = True # set bool for p_red_finished to true + #t2 = time.perf_counter_ns() + #print(f"processing time for red channel: {round((t2-t1)*1e-6,2)} ms") + + last_processed_frame = framenumber + shm_bools[4] = False # process ended + # elif shm_bools[0] == False: + # pass + #print(f"no new red frame") + + # image processing finished + + + + except KeyboardInterrupt: + try: + shm_bool_init.close() + shm_framenumber_init.close() + shm_redframe_init.close() + + except FileNotFoundError: + # Memory already destroyed + pass + +def processing_bgr(): + shm_bool_init = shared_memory.SharedMemory(name="shm_bools") # Attach to existing shared memory block + shm_bools = np.ndarray((10,), dtype=np.bool8, buffer= shm_bool_init.buf) # do not: newframe = np.array(...)[0] --> then one can not assign new value in main script + newframe = shm_bools[0] + p_red_finished = shm_bools[1] # not used, but for clarity + p_red_started = shm_bools[4] + + shm_framenumber_init = shared_memory.SharedMemory\ + (name="shm_framenumber") # Attach to existing shared memory block + shm_framenumber = np.ndarray((1,), dtype=np.uint64, \ + buffer= shm_framenumber_init.buf) + # framenumer = shm_framenumber[0] + + shm_frame_init = shared_memory.SharedMemory\ + (name="shm_frame") # Attach to existing shared memory block + shm_redframe = np.ndarray((image_heigth,image_width, number_of_colorchannels), dtype=np.uint8, \ + buffer= shm_frame_init.buf) + image_bgr = shm_redframe + + params_for_blob_detection = define_parameters_for_blob_detection() + + i=0 + while True: + try: + framenumber = shm_framenumber[0] + if i==0: + last_processed_frame = framenumber + + conditions_for_first_start = (i==0) and\ + (shm_bools[0] == True) and \ + (shm_bools[1] == False) and (shm_bools[2] == False) and (shm_bools[3] == False) \ + and (shm_bools[7] == True) + + conditions_for_starting_processing = (framenumber>last_processed_frame) and (shm_bools[7] == True) + # newframe and all color-channel-processings have to be finished + + if conditions_for_first_start == True: + shm_bools[4] = True # process started + shm_bools[7] = False # reset trigger + + shm_bools[1] = False # set bool for p_red_finished to false + #t1 = time.perf_counter_ns() + lane_detection(image_bgr, params_for_blob_detection) + i += 1 + #print(f"first processing red finished. frame: {framenumber}") + shm_bools[1] = True # set bool for p_red_finished to true + + shm_bools[4] = False # process ended + elif conditions_for_starting_processing == True: + shm_bools[4] = True # process started + shm_bools[7] = False # reset trigger + #print(f"red: framenumber: {framenumber}, last_processed_frame: {last_processed_frame}") + + shm_bools[1] = False # set bool for p_red_finished to false + #t1 = time.perf_counter_ns() + start_processing = time.perf_counter_ns() + lane_detection(image_bgr, params_for_blob_detection) + end_processing = time.perf_counter_ns() + time_processing = end_processing-start_processing + time_processing = time_processing*1e-6 + time_processing = round(time_processing, 2) + print(f'processing time: {time_processing} ms') + #print(f"max frame color red: {shm_redframe.max()}") + if show_opencv_window: + cv.imshow("red", shm_redframe) + cv.waitKey(1) + #print(f"processing red finished. frame: {framenumber}") + shm_bools[1] = True # set bool for p_red_finished to true + #t2 = time.perf_counter_ns() + #print(f"processing time for red channel: {round((t2-t1)*1e-6,2)} ms") + + last_processed_frame = framenumber + shm_bools[4] = False # process ended + # elif shm_bools[0] == False: + # pass + #print(f"no new red frame") + + # image processing finished + + + + except KeyboardInterrupt: + try: + shm_bool_init.close() + shm_framenumber_init.close() + shm_redframe_init.close() + + except FileNotFoundError: + # Memory already destroyed + pass + +# ---------------------------------------------------------------------------- +# main +def main(): + start = time.perf_counter() + + try: + # create processes + p_red = Process(target=processing_bgr) + processes = [p_red] + + print(f"waiting 1 second to create processes") + time.sleep(1) # sind prozesse schon vorhanden + + # start acitivity of processes + for process in processes: + process.start() + + # start capturing + get_frames_from_picamera() + + + print('*******************************') + # this below code is only executed if the loop in take_image_picamera_opencv is breaked + # In real use case there will be no end of this program + + for process in processes: + process.terminate() + + # print time measuring + end = time.perf_counter() + print(f'Script finished in {round(end-start, 2)} s') + + # close each SharedMemory instance and unlink to release the shared memory + shm_bools_create.close() + shm_bools_create.unlink() + shm_framenumber_create.close() + shm_framenumber_create.unlink() + shm_redframe_create.close() + shm_redframe_create.unlink() + shm_greenframe_create.close() + shm_greenframe_create.unlink() + shm_blueframe_create.close() + shm_blueframe_create.unlink() + + except KeyboardInterrupt: + # Normally this prgoram never gets keyboard interrupted! But here this case is nevertheless handled + # End Script + try: + # close each SharedMemory instance and unlink to release the shared memory + shm_bools.close() + shm_bools.unlink() + shm_framenumber_create.close() + shm_framenumber_create.unlink() + shm_redframe_create.close() + shm_redframe_create.unlink() + shm_greenframe_create.close() + shm_greenframe_create.unlink() + shm_blueframe_create.close() + shm_blueframe_create.unlink() + + except FileNotFoundError: + # Memory already destroyed + pass + + +if __name__ == "__main__": + main() diff --git a/90_ZielSW/Multiprocessing/Lanedetection_Picamera_V02.py b/90_ZielSW/Multiprocessing/Lanedetection_Picamera_V02.py new file mode 100644 index 0000000..eb14c95 --- /dev/null +++ b/90_ZielSW/Multiprocessing/Lanedetection_Picamera_V02.py @@ -0,0 +1,905 @@ + +import cv2 as cv +import numpy as np + +import picamera +from picamera.array import PiRGBArray +from fractions import Fraction + +import time +from datetime import datetime +import os +import sys + +import numpy as np +import math as M + +from multiprocessing import Process, shared_memory + +# Parameters +pixels_per_mm = 71/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320 +# pixels_per_mm = 107/24.25 #[px/mm] for 120 mm camera height for resolution: 640x480 + +# Offset Camera Sensor in Scooty according to Scooty-KS +x_offset_camera_mm = 100 # [mm] +y_offset_camera_mm = 50 # [mm] +x_offset_camera_px = x_offset_camera_mm*pixels_per_mm # [px] +y_offset_camera_px = y_offset_camera_mm*pixels_per_mm # [px] + +# image parameters +image_heigth = 320 # shape [0] +image_width = 416 # shape[1] +# calculate center of image +[x_0, y_0] = np.array([image_width/2, image_heigth/2], dtype=np.uint16) + + +threshold_color_detection = 60 # values under this will not be considered as active leds in each color channel + +# Parameters for Blob/LED Detection +minDiameter_mm = 5 # [mm] minimum diameter of detected blob/LED +maxDiameter_mm = 9 # [mm] maximum diameter of detected blob/LED + +# Define color numbers to identify the color channels in the matrix with all detected LEDs. No string, because numpy array should stay uint16 +color_number_off = 0 +color_number_red = 1 +color_number_green = 2 +color_number_blue = 3 +color_number_yellow = 4 +color_number_magenta = 5 +color_number_cyan = 6 +color_number_white = 7 + +#---------------------------------------------------------------------- +# Define camera settings +SENSOR_MODE = 4 # corresponding sensor mode to resolution +OUTPUT_RESOLUTION = (320, 416) # (width, heigth) +image_width = OUTPUT_RESOLUTION[0] +image_heigth = OUTPUT_RESOLUTION[1] +number_of_colorchannels = 3 # r, g, b +size_of_frame=int(image_heigth*image_heigth*number_of_colorchannels) +frame_dimension = int(number_of_colorchannels) + +AWB_MODE = 'off' # Auto white balance mode +AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, floar or fraction are valid. +BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100] + #the brighter, the brighter the LEDs and the higher the RGB values and vice versa! +CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100] + +SHUTTER_SPEED = 50 # [µs] + +ISO = 320 # ISO value +EXPOSURE_MODE = 'off' +FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4 + +SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s + +# miscellaneous parameters +max_value_of_uint64 = int((2**64) - 1) # @30 fps: konservative calculated driving time: 1.95*1e10 years --> Integer overflow not relevant. + +# settings for development +show_opencv_window = True # show opencv window +draw_opencv = True # draw lane and so on + +# create shared Memorys for main-process +# shared memory for bools +shm_bools_pre=np.array([False, False, False, False, False, False, False, False, False, False], dtype=np.bool8) # create numpy array with bools stored in it + # [0]: newframe [1]: p_red_finished [2]: p_green_finished [3]: p_blue_finished + # [4]: p_red_started [5]: p_green_started [6]: p_blue_started + # [7]: p_red_start_trigger [8]: p_green_start_trigger [9]: p_blue_start_trigger +size_of_buffer = shm_bools_pre.nbytes +print(f"size of buffer: {size_of_buffer}") # size of buffer: 10 +print(f"shm_bools dtype: {shm_bools_pre.dtype}") # dtype: bool +shm_bools_create = shared_memory.SharedMemory(name="shm_bools", create=True, size=shm_bools_pre.nbytes) # create a new shared memory block +shm_bools = np.ndarray(shm_bools_pre.shape, dtype=shm_bools_pre.dtype, buffer=shm_bools_create.buf) # create a NumPy array backed by shared memory +shm_bools[:] = shm_bools_pre[:] # Copy the original data into shared memory + +# print(shm_bool) +# print(shm_bools.name) + +# shared memory for framenumber +shm_framenumber_pre=np.array([0], dtype=np.uint64) +size_of_buffer = shm_framenumber_pre.nbytes +print(f"size of framenumber-buffer: {size_of_buffer}") #8 +print(f"shm_framenumber dtype: {shm_framenumber_pre.dtype}") #uint64 +shm_framenumber_create = shared_memory.SharedMemory(name="shm_framenumber", create=True, size=shm_framenumber_pre.nbytes) # create a new shared memory block +shm_framenumber = np.ndarray(shm_framenumber_pre.shape, dtype=shm_framenumber_pre.dtype, buffer=shm_framenumber_create.buf) # create a NumPy array backed by shared memory +shm_framenumber[:] = shm_framenumber_pre[:] # Copy the original data into shared memory +# print(shm_framenumber) # [0] +# print(shm_framenumber_create.name) # shm_framenumber + +# shared memory for red, green, blue frame +int_black = 0 # integer for black color/ no color +shm_colorframes_pre = np.full(\ + (image_heigth,image_width), \ + int_black, dtype=np.uint8) +size_of_buffer = shm_colorframes_pre.nbytes +print(f"size of colorframe-buffer: {size_of_buffer}") #133 120 +print(f"shm_colorframes_pre dtype: {shm_colorframes_pre.dtype}") #uint8 +shm_redframe_create = shared_memory.SharedMemory(name="shm_redframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block +shm_greenframe_create = shared_memory.SharedMemory(name="shm_greenframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block +shm_blueframe_create = shared_memory.SharedMemory(name="shm_blueframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block +shm_redframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_redframe_create.buf) # create a NumPy array backed by shared memory +shm_greenframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_greenframe_create.buf) # create a NumPy array backed by shared memory +shm_blueframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_blueframe_create.buf) # create a NumPy array backed by shared memory +shm_redframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory +shm_greenframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory +shm_blueframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory +# shared memory bgr frame +int_black = 0 # integer for black color/ no color +shm_frame_pre = np.full(\ + (image_heigth,image_width, number_of_colorchannels), \ + int_black, dtype=np.uint8) +size_of_buffer = shm_frame_pre.nbytes +print(f"size of colorframe-buffer: {size_of_buffer}") #399 360 = 416 x 320 x 3 +print(f"shm_colorframes_pre dtype: {shm_frame_pre.dtype}") #uint8 +shm_frame_create = shared_memory.SharedMemory(name="shm_frame", create=True, size=shm_frame_pre.nbytes) # create a new shared memory block +shm_frame = np.ndarray(shm_frame_pre.shape, dtype=shm_frame_pre.dtype, buffer=shm_frame_create.buf) # create a NumPy array backed by shared memory +shm_frame[:] = shm_frame_pre[:] # Copy the original data into shared memory + + +# ---------------------------------------------------------------------------- +# Define Functions +def points_trafo(detected_LEDs, alpha_rad, dx, dy): + """Tranfsform points of LED to lane in KS-LED""" + detected_LEDs_trafo = detected_LEDs.copy() # copy, becuase else it is only a pointer + detected_LEDs_trafo = detected_LEDs_trafo.astype(np.int16) # avoid integer overflow + x_pnts = detected_LEDs_trafo[:,0] + y_pnts = detected_LEDs_trafo[:,1] + + # Translation + x1 = x_pnts-dx-x_0 + x_trafo=x1 + y1 = y_pnts-dy-y_0 + y_trafo = y1 + + # Rotation. Winkel Sensor im UZS, also negativ zu mathematischer definiton + x_trafo = np.cos(-alpha_rad)*x1-np.sin(-alpha_rad)*y1 + detected_LEDs_trafo[:,0] = x_trafo + y_trafo = np.sin(-alpha_rad)*x1+np.cos(-alpha_rad)*y1 + detected_LEDs_trafo[:,1] = y_trafo + + #sort points along lane: x_2, y_2 -axis (KS_LED) + detected_LEDs_trafo = detected_LEDs_trafo[detected_LEDs_trafo[:, 0].argsort(kind='quicksort')] + return detected_LEDs_trafo + +def construct_lane(detected_LEDs, img_bgr): + """construct the lane""" + # This function is partially commented in german, because higher math is used + # clearer what is trying to be achieved + + # get points + # xy_pnts = detected_LEDs[:,0:2] + # x_pnts = detected_LEDs[:,0] + # y_pnts = detected_LEDs[:,1] + + # approach 2: + # fit line through centers of LEDs in KS_0 + # DIST_L": the simplest and the fastest least-squares method: the simple euclidean distance + param = 0 # not used for DIST_L2 + reps = 0.001 # Sufficient accuracy for the radius (distance between the coordinate origin and the line). + aeps = 0.001 # Sufficient accuracy for the angle. + [dx, dy, x_2, y_2] = cv.fitLine(detected_LEDs[:,0:2], cv.DIST_L2, param, reps, aeps) + # x2, y2: same as: mean_of_leds = np.mean([x_pnts, y_pnts], 1) + + alpha_rad = np.arctan2(dy, dx) # calculate angle of line + alpha = np.arctan2(dy, dx)*180/np.pi # calculate angle of line + # print(f"Lane: dx: {dx}, dy:{dy}, x2:{x_2}, y2:{y_2}, alpha:{alpha}°") + print(f"Lane: alpha:{alpha[0]}°") + + # get smallest distance to point an line + # Berechnung nach: Repetitorium Höhere Mathematik, Wirth + # Gerade: x = a+ t*b + # Punkt : OP = p + # d = abs(b x (p-a))/(abs(b)) + # info: np.array()[:,0] --> gets only array with 1 dimensions with desired values + p = np.array([x_0, y_0]) + a = np.array([x_2, y_2])[:,0] + b = np.array([np.cos(alpha_rad), np.sin(alpha_rad)])[:,0] # Richtungsvektor + c = p-a + + # Betrag von Vektor: np.linalg.norm(vec) + cross= np.cross(b, c) + d = np.linalg.norm(cross)/np.linalg.norm(b) # distance [px] + #print(f"d: {round(d,2)}") + + # Fußpunkt (X_LED, Y_LED) + t_0_dot = np.dot(c, b) + t_0_norm = (np.linalg.norm(b)**2) + t_0 = t_0_dot/t_0_norm + [x_LED, y_LED] = (a+t_0*b) + print(f"x_LED: {x_LED}, y_LED: {y_LED}") + + # Abstand (dx, dy) Fußpunkt zu KS_0 + dx_LED = x_LED - x_0 + dx_LED_mm = dx_LED*(1/pixels_per_mm) + dy_LED = y_LED - y_0 + dy_LED_mm = dy_LED*(1/pixels_per_mm) + print(f"dx_LED:{dx_LED} [px] , dy_LED:{dy_LED} [px]") + print(f"dx_LED:{dx_LED_mm} [mm] , dy_LED:{dy_LED_mm} [mm]") + + # Abstand (dx, dy) Fußpunkt von Bildmitte zu KS_Scooty + # Diese Werte zurückgeben + dx_LED_scooty = x_LED - x_0 + x_offset_camera_px + dx_LED_scooty_mm = dx_LED_scooty*(1/pixels_per_mm) + dy_LED_scooty = y_LED - y_0 + y_offset_camera_px + dy_LED_scooty_mm = dy_LED_scooty*(1/pixels_per_mm) + print(f"dx_LED_scooty:{dx_LED_scooty} [px] , dy_LED_scooty:{dy_LED_scooty} [px]") + print(f"dx_LED_scooty:{dx_LED_scooty_mm} [mm] , dy_LED_scooty:{dy_LED_scooty_mm} [mm]") + + # Punkte Trafo, um sortierte position der LEDs entlang Spur zu erhalten + # Bei normal detected kann bei vertikaler LED zb Fehler entstehen und dann muster: 211323233 -> daher mit dieser sortierten weitermachen + detected_LEDs_KS_LED = points_trafo(detected_LEDs, alpha_rad, dx_LED, dy_LED) + print(f"Detected LEDs in KS_LED:(x2, y2):\n {detected_LEDs_KS_LED}") + + #----------------------------------- + # draw useful lines and points + + # draw lane line + pt_0 = (a+b*np.array([-300, -300])).astype(np.int32) + pt_1 = (a+b*np.array([300, 300])).astype(np.int32) + #print(f"pt_0: {pt_0}, pt_1: {pt_1}") + cv.line(img_bgr, pt_0, pt_1, (255,255,255),1) # draw lane + + # draw dx dy + cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # shortest distance from KS_0 to KS_LED --> Lot + # cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_0)), (0,0,255), 2) # only dx + # cv.line(img_bgr, (int(x_LED), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # only dy + + #draw additional points + cv.circle(img_bgr, (int(x_2), int(y_2)), 5,(255,128,255),-1) #pink. Center of points + #cv.putText(img_bgr, '(x2, y2)',(int(x_2)+5, int(y_2)-5), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), cv.LINE_AA) + cv.circle(img_bgr, (int(x_LED), int(y_LED)), 5,(170,255,0),-1) # lime green. Fußpunkt + + cv.imshow("Lane", img_bgr) + + return dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED + +def convert_rgb_to_grayscale_average(image_bgr): + """This function converts the RGB image into an grayscale image. + Algorithm: Average: Y = (R+G+B)/3""" + + # convert dtype to prevent integer overflow while addition + image_bgr = image_bgr.astype(np.uint16) + image_gray = (image_bgr[:,:,0]+image_bgr[:,:,1]+image_bgr[:,:,2])/3 # add values / do conversion + image_gray = image_gray.astype(np.uint8) # convert back to uint8 + + return image_gray + +def define_parameters_for_blob_detection(): + """set parameters for simple blob detector""" + params = cv.SimpleBlobDetector_Params() + + # Threshold for Convert the source image to binary images by applying thresholding + # with several thresholds from minThreshold (inclusive) to maxThreshold (exclusive) + # with distance thresholdStep between neighboring thresholds. + # Since the Grayscale image is dark if only one color channel is active, + # the Threshold values have to be set like this. + # particularly the thresholdStep-Value has to be low + params.minThreshold=20 # reminder: this value is set for grayscale image + params.maxThreshold=255 + params.thresholdStep=1 + + params.filterByColor=False # do not filter blobs by color + + # Filter blobs by Area + params.filterByArea=True + minDiameter_px = minDiameter_mm*pixels_per_mm # [px] minimum diameter of detected blob/LED + maxDiameter_px = maxDiameter_mm*pixels_per_mm # [px] maximum diameter of detected blob/LED + minArea_px2 = np.pi/4*minDiameter_px**2 + maxArea_px2 = np.pi/4*maxDiameter_px**2 + + params.minArea = minArea_px2 # min Area of a blob in px^2 + # params.maxArea = maxArea_px2 # max Area of a blob in px^2. + # reasons for not filtering maxArea: motion blur + rolling shutter --> larger Area + + # Filter by Inertia + params.filterByInertia=False + params.minInertiaRatio = 0.2 # [0-1] + + # Filter by Convexity + params.filterByConvexity=False + params.minConvexity = 0.2 # [0-1] + + # Filter by Circularity + params.filterByCircularity=False + params.minCircularity = 0.4 # [0-1] + + # params.minDistBetweenBlobs = minDist_px # this has no effect + + return params + +def detect_LED_positions_in_grayscale(image_gray, image_bgr, params_for_blob_detection): + + detector = cv.SimpleBlobDetector_create(params_for_blob_detection) # Set up the detector with specified parameters. + keypoints = detector.detect(image_gray) # Detect blobs --> LEDs + number_of_detected_leds = len(keypoints) + + if number_of_detected_leds != 0: + # print information of keypoints + print(f"detected LEDs: {number_of_detected_leds}") + + #Pre-allocate matrix for numpy + number_of_rows = number_of_detected_leds + number_of_columns = 3 + position_of_leds = np.zeros((number_of_rows, number_of_columns), dtype=np.uint16) + for i, k in enumerate(keypoints): + # x_pos = round(k.pt[0],0) # x position + # y_pos = round(k.pt[1],0) # y position + # print(f"x: {x_pos} y: {y_pos}") + # diameter_px = round(k.size,2) + # diameter_mm = round(diameter_px*1/pixels_per_mm,2) + # print(f"diameter [px]: {diameter_px} diameter [mm]: {diameter_mm}") # diameter + # area_px2 = round(np.pi/4*k.size**2,0) # area in px^2 + # area_mm2 = round(area_px2*(1/pixels_per_mm)**2,0) + # print(f"area [px^2]: {area_px2} area [mm^2]: {area_mm2}") + # print('') + + # calculate parameters to transfer to matrix + # x_pos = int(np.ceil(x_pos)) + # y_pos = int(np.ceil(y_pos)) + # Fill matrix + # position_of_leds[i,:] = [x_pos,y_pos, 0] + position_of_leds[i,0] = int(np.ceil(k.pt[0])) # x positon + position_of_leds[i,1] = int(np.ceil(k.pt[1])) # y position + + + if draw_opencv: + # draw the keypoints on the original image + # cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob + blobs = cv.drawKeypoints(image=image_bgr, keypoints=keypoints, color=(255, 255, 255), \ + outImage=np.array([]), flags= cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) + + if show_opencv_window: + # cv.imshow("grayscale", image_gray) + cv.imshow("Detected", blobs) + + return position_of_leds + + else: + print(f"No LEDs were detected") + return None + +def detect_position_of_all_LEDs_grayscale(image_gray, image_bgr, params_for_blob_detection): + position_of_LEDs = detect_LED_positions_in_grayscale(image_gray, image_bgr, params_for_blob_detection) + + if position_of_LEDs is not None: + return position_of_LEDs + else: + return None + +def get_color_of_leds(matrix_of_LEDs, image_bgr): + # is image_r[y_pos, x_pos] = image_bgr[y_pos,x_pos, 2] ? --> yes. No need to split the color channels. + + offset = 2 # half of length from rectangle which is going to be used to determine the color around the middle point of the blob/led + # offset = 0 --> only the value from the middle point of the blob/led + # offset=1 --> 9 values, offset=2-->25 values + + for led in matrix_of_LEDs: + x_pos = led[0] # uint16 + y_pos = led[1] # uint16 + + # get values of color channels in region around middle point of blob/led: + # +1 at stop index, because it is not inclusive + region_around_blue_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 0] # uint8 + region_around_green_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 1] # uint8 + region_around_red_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 2] # uint8 + + # average of the values + # convert dtype to prevent integer overflow while addition + region_around_red_led = region_around_red_led.astype(np.uint16) + region_around_green_led = region_around_green_led.astype(np.uint16) + region_around_blue_led = region_around_blue_led .astype(np.uint16) + # sum all elements in matrix and divide with number of elements + number_of_elements= region_around_blue_led.size + value_of_red_led = region_around_red_led.sum()/number_of_elements # float64, if not integer result + value_of_green_led = region_around_green_led.sum()/number_of_elements # float64, if not integer result + value_of_blue_led = region_around_blue_led.sum()/number_of_elements # float64, if not integer result + + # determine which leds are active: + # if value > threshold --> led is active + status_blue_led = False; status_green_led = False; status_red_led = False + if value_of_blue_led > threshold_color_detection: + status_blue_led = True + if value_of_green_led > threshold_color_detection: + status_green_led = True + if value_of_red_led > threshold_color_detection: + status_red_led = True + + # determine color by checking the cases: + # case 1: red + if status_blue_led==False and status_green_led==False and status_red_led==True: + color = color_number_red + # case 2: green + elif status_blue_led==False and status_green_led==True and status_red_led==False: + color = color_number_green + # case 3: blue + elif status_blue_led==True and status_green_led==False and status_red_led==False: + color = color_number_blue + # case 4: yellow = red + green + elif status_blue_led==False and status_green_led==True and status_red_led==True: + color = color_number_yellow + # case 5: magenta = red + blue + elif status_blue_led==True and status_green_led==False and status_red_led==True: + color = color_number_magenta + # case 6: cyan = green + blue + elif status_blue_led==True and status_green_led==True and status_red_led==False: + color = color_number_cyan + # case 7: white = red + green + blue + elif status_blue_led==True and status_green_led==True and status_red_led==True: + color = color_number_white + # case 8: led not active + # this case can not occur, because no inactive led can be detected from the implemented blob-algorithm in detect_LED_positions_in_grayscale + else: + color = color_number_off + + # fill matrix with color + led[2] = color # uint16 + + return matrix_of_LEDs + + +def detect_LEDs_with_grayscale(image_bgr, params_for_blob_detection): + # convert rgb to grayscale image + start_m1 = time.perf_counter_ns() + image_gray = convert_rgb_to_grayscale_average(image_bgr) + end_m1= time.perf_counter_ns() + time_processing = end_m1-start_m1 + time_processing = time_processing*1e-6 + time_processing = round(time_processing, 2) + print(f'processing time grayscale_conversion: {time_processing} ms') + + + # get position of leds + position_of_LEDs = detect_position_of_all_LEDs_grayscale(image_gray=image_gray, image_bgr=image_bgr, params_for_blob_detection=params_for_blob_detection) + + if position_of_LEDs is not None: + # determine color of leds and add to matrix + detected_LEDs = get_color_of_leds(position_of_LEDs, image_bgr) + return detected_LEDs + else: + return None + + +def lane_detection(image_bgr, params_for_blob_detection): + + # Detect LEDs + print(f"Detect LEDs and color:") + detected_LEDs = detect_LEDs_with_grayscale(image_bgr, params_for_blob_detection) + + + + if detected_LEDs is not None: + # Contruct lane + print(f"_____________________________________") + print("Contruct lane") + dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED = \ + construct_lane(detected_LEDs, image_bgr) + + # print result + print(f"Detected LEDs relative to image-center(x0,y0):\n{detected_LEDs}") + return detected_LEDs + else: + return None + + + +# ------------------- +# Define Funcions +def get_frames_from_picamera(): + # newframe= shm_bools[0] # do not use this! no updted values in "newframe" + # framenumber = shm_framenumber[0] + + # Initialise Camera + with picamera.PiCamera() as camera: + with PiRGBArray(camera) as output: + # Set camera settings + camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode + camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage! + camera.framerate = FRAMERATE + + camera.awb_mode = AWB_MODE + camera.awb_gains = AWB_GAINS + + camera.iso = ISO + camera.shutter_speed = SHUTTER_SPEED + + time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off + camera.exposure_mode = EXPOSURE_MODE + + time.sleep(1) # wait before applying brightness and contrast + camera.brightness = BRIGHTNESS + camera.contrast = CONTRAST + time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings + + t_start= time.perf_counter() # save time for fps calculation + + for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)): + # General information: + # - always the newest frame is recieved: processing must be faster than fps if every frame should be processed + + shm_bools[0] = True + framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1 + + + shm_framenumber[0] = framenumber + #print('') + #print(f"new frame: {framenumber}") + + #image = frame.array # raw NumPy array without JPEG encoding + + #b,g,r = cv.split(frame.array) # split colour channels of raw NumPy array without JPEG encoding + shm_frame[:] = frame.array + #shm_redframe[:] = r + #shm_greenframe[:] = g + #shm_blueframe[:] = b + # for better performance one can assign directly in funtion line the values to the shm_memorys: shm_red, .. , ... = cv.split(..) + + shm_bools[7:10]=[True] # trigger the start of the processing for each colorchannel + #print(shm_bools[7], shm_bools[8], shm_bools[9]) + + #display_image_with_text(image, shutter_speed, framenumber, camera_exposure_speed, trigger_record_OpenCV, out) # show the frame + + output.truncate(0) # clear the stream for next frame + + if framenumber == 500: # 5 sek @ 30 fps, only for performance measuring + t_stop=time.perf_counter() + print(f"calculated fps: {framenumber/(t_stop-t_start)}") + break + + +def display_image_with_text(img, shutter_speed, framenumber, camera_exposure_speed, trigger_record_OpenCV, out): + img = img.copy() # make copy of image and do not modify the original image + + # please activate only one trigger once + trigger_show_brightness = 0 # trigger for (not) calculating andshowing the brightness of the image+ + if trigger_show_brightness == 1: + arithmetic_mean_of_brightness_per_pixel_relative = calc_arithmetic_mean_of_brightness_per_pixel(img) + + trigger_show_max_brightness_values_of_colour_channels = 0 # trigger for (not) calculating and showing max values of colour chanels + if trigger_show_max_brightness_values_of_colour_channels == 1: + r_max, g_max, b_max = get_max_rgb_values(img) + + font = cv.FONT_HERSHEY_SIMPLEX # font + fontScale = 1 # fontScale + color = (255, 255, 255) # Font colour in BGR + thickness = 1 # Line thickness in px + + # set text position + frame_width = int(img.shape[1]) + frame_height = int(img.shape[0]) + text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height + text_linespacing = 50 # line spacing between two strings in pixels + # text_start_position_X = int(frame_width/4) # start text from 1/4 of image width + text_start_position_X = int(0) # start text from left edge of image + + # set position in (x,y)-coordinated from top left corner. Bottom-left corner of the text string in the image. + pos_1 = (text_start_position_X, text_start_position_Y) # start text from 1/4 of image width + pos_2 = (text_start_position_X, text_start_position_Y+text_linespacing) # start text from 1/4 of image width + pos_3 = (text_start_position_X, text_start_position_Y+2*text_linespacing) # start text from 1/4 of image width + if trigger_show_brightness==1 or trigger_show_max_brightness_values_of_colour_channels==1: + pos_4 = (text_start_position_X, text_start_position_Y+3*text_linespacing) # start text from 1/4 of image width + + + # define text to display + text_line_1 = f"set ss: {shutter_speed} us" + text_line_3 = f"Frame: {framenumber}" + text_line_2 = f"ret exs: {camera_exposure_speed} us" + if trigger_show_brightness==1: + if arithmetic_mean_of_brightness_per_pixel_relative >= 0.01: + text_line_4 = f"brightness: {round(arithmetic_mean_of_brightness_per_pixel_relative*100,2)} %" + elif arithmetic_mean_of_brightness_per_pixel_relative < 0.01: + text_line_4 = f"brightness: {round(arithmetic_mean_of_brightness_per_pixel_relative*10e3,2)} pm" + if trigger_show_max_brightness_values_of_colour_channels==1: + text_line_4 = f"max: r:{r_max} g:{g_max} b:{b_max}" + + + # put the text into the image + #image_text_1 = cv.putText(img, text_line_1, pos_1, font, + # fontScale, color, thickness, cv.LINE_AA) + #image_text_2 = cv.putText(img, text_line_2, pos_2, font, + # fontScale, color, thickness, cv.LINE_AA) + #image_text_3 = cv.putText(img, text_line_3, pos_3, font, + # fontScale, color, thickness, cv.LINE_AA) + if trigger_show_brightness==1 or trigger_show_max_brightness_values_of_colour_channels==1: + image_text_4 = cv.putText(img, text_line_4, pos_4, font, + fontScale, color, thickness, cv.LINE_AA) + + + cv.imshow("Current Frame", img) # display the image + if trigger_record_OpenCV == 1: + out.write(img) # write frame to Video + + +def calc_arithmetic_mean_of_brightness_per_pixel(image): + """Calculate overall brightness per pixel of the image. Mittelere Helligkeit pro pixel berechnen.""" + #Comment: So rechenintensiv, dass man kein Blitzen sieht im Bild. (Oder sehr selten bzw. schwach). Daher anzeige von max-werten + + b,g,r = cv.split(image) # OpenCV works with bgr. Image is also speciefied to be captured in bgr. + r=r.astype('uint16') # set dtype of one colour to uint16, because the sum of 255+255+255 >255 =765 + #the rest will also be uint16 then + image_heigth = r.shape[0] + image_width = r.shape[1] + + number_of_colour_channels = 3 + arithmetic_mean_of_brightness_image = np.sum((r+g+b)/number_of_colour_channels) + arithmetic_mean_of_brightness_per_pixel = arithmetic_mean_of_brightness_image/(image_width*image_heigth) + + max_possible_brightness = 255 # maximum possible brightness + arithmetic_mean_of_brightness_per_pixel_relative = arithmetic_mean_of_brightness_per_pixel/max_possible_brightness + + return arithmetic_mean_of_brightness_per_pixel_relative + +def get_max_rgb_values(image): + """get max values of colour channels""" + b,g,r = cv.split(image) # OpenCV works with bgr. Image is also speciefied to be captured in bgr. + + r_max=r.max() + g_max=g.max() + b_max=b.max() + + return r_max, g_max, b_max + +def create_folder_for_captures(): + # Create folder for saving the captured pictures + now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M") + path_cwd = os.getcwd() + + path_saveFolder = path_cwd+r"/Capture_"+d1 + try: + os.mkdir(path_saveFolder) + folder_exists = True + except OSError: + print("Error! Ending script.") + quit() + + return path_saveFolder, folder_exists + +def do_processing(): + time.sleep(0.001) + #print("ohh i am doing high complex image analysis with computer vision") + +def do_processing_frame_r(frame): + print(f"max frame color red: {frame.max()}") +def do_processing_frame_g(frame): + print(f"max frame color green: {frame.max()}") +def do_processing_frame_b(frame): + print(f"max frame color blue: {frame.max()}") + + + +def processing_red(): + shm_bool_init = shared_memory.SharedMemory(name="shm_bools") # Attach to existing shared memory block + shm_bools = np.ndarray((10,), dtype=np.bool8, buffer= shm_bool_init.buf) # do not: newframe = np.array(...)[0] --> then one can not assign new value in main script + newframe = shm_bools[0] + p_red_finished = shm_bools[1] # not used, but for clarity + p_red_started = shm_bools[4] + + shm_framenumber_init = shared_memory.SharedMemory\ + (name="shm_framenumber") # Attach to existing shared memory block + shm_framenumber = np.ndarray((1,), dtype=np.uint64, \ + buffer= shm_framenumber_init.buf) + # framenumer = shm_framenumber[0] + + shm_redframe_init = shared_memory.SharedMemory\ + (name="shm_redframe") # Attach to existing shared memory block + shm_redframe = np.ndarray((image_heigth,image_width), dtype=np.uint8, \ + buffer= shm_redframe_init.buf) + + + i=0 + while True: + try: + framenumber = shm_framenumber[0] + if i==0: + last_processed_frame = framenumber + + conditions_for_first_start = (i==0) and\ + (shm_bools[0] == True) and \ + (shm_bools[1] == False) and (shm_bools[2] == False) and (shm_bools[3] == False) \ + and (shm_bools[7] == True) + + conditions_for_starting_processing = (framenumber>last_processed_frame) and (shm_bools[7] == True) + # newframe and all color-channel-processings have to be finished + + if conditions_for_first_start == True: + shm_bools[4] = True # process started + shm_bools[7] = False # reset trigger + + shm_bools[1] = False # set bool for p_red_finished to false + #t1 = time.perf_counter_ns() + do_processing() + i += 1 + #print(f"first processing red finished. frame: {framenumber}") + shm_bools[1] = True # set bool for p_red_finished to true + + shm_bools[4] = False # process ended + elif conditions_for_starting_processing == True: + shm_bools[4] = True # process started + shm_bools[7] = False # reset trigger + #print(f"red: framenumber: {framenumber}, last_processed_frame: {last_processed_frame}") + + shm_bools[1] = False # set bool for p_red_finished to false + #t1 = time.perf_counter_ns() + do_processing() + #print(f"max frame color red: {shm_redframe.max()}") + if show_opencv_window: + cv.imshow("red", shm_redframe) + cv.waitKey(1) + #print(f"processing red finished. frame: {framenumber}") + shm_bools[1] = True # set bool for p_red_finished to true + #t2 = time.perf_counter_ns() + #print(f"processing time for red channel: {round((t2-t1)*1e-6,2)} ms") + + last_processed_frame = framenumber + shm_bools[4] = False # process ended + # elif shm_bools[0] == False: + # pass + #print(f"no new red frame") + + # image processing finished + + + + except KeyboardInterrupt: + try: + shm_bool_init.close() + shm_framenumber_init.close() + shm_redframe_init.close() + + except FileNotFoundError: + # Memory already destroyed + pass + +def processing_bgr(): + shm_bool_init = shared_memory.SharedMemory(name="shm_bools") # Attach to existing shared memory block + shm_bools = np.ndarray((10,), dtype=np.bool8, buffer= shm_bool_init.buf) # do not: newframe = np.array(...)[0] --> then one can not assign new value in main script + newframe = shm_bools[0] + p_red_finished = shm_bools[1] # not used, but for clarity + p_red_started = shm_bools[4] + + shm_framenumber_init = shared_memory.SharedMemory\ + (name="shm_framenumber") # Attach to existing shared memory block + shm_framenumber = np.ndarray((1,), dtype=np.uint64, \ + buffer= shm_framenumber_init.buf) + # framenumer = shm_framenumber[0] + + shm_frame_init = shared_memory.SharedMemory\ + (name="shm_frame") # Attach to existing shared memory block + shm_redframe = np.ndarray((image_heigth,image_width, number_of_colorchannels), dtype=np.uint8, \ + buffer= shm_frame_init.buf) + image_bgr = shm_redframe + + + params_for_blob_detection = define_parameters_for_blob_detection() + + i=0 + while True: + try: + framenumber = shm_framenumber[0] + if i==0: + last_processed_frame = framenumber + + conditions_for_first_start = (i==0) and\ + (shm_bools[0] == True) and \ + (shm_bools[1] == False) and (shm_bools[2] == False) and (shm_bools[3] == False) \ + and (shm_bools[7] == True) + + conditions_for_starting_processing = (framenumber>last_processed_frame) and (shm_bools[7] == True) + # newframe and all color-channel-processings have to be finished + + if conditions_for_first_start == True: + shm_bools[4] = True # process started + shm_bools[7] = False # reset trigger + + shm_bools[1] = False # set bool for p_red_finished to false + #t1 = time.perf_counter_ns() + lane_detection(image_bgr, params_for_blob_detection) + i += 1 + #print(f"first processing red finished. frame: {framenumber}") + shm_bools[1] = True # set bool for p_red_finished to true + + shm_bools[4] = False # process ended + elif conditions_for_starting_processing == True: + shm_bools[4] = True # process started + shm_bools[7] = False # reset trigger + #print(f"red: framenumber: {framenumber}, last_processed_frame: {last_processed_frame}") + + shm_bools[1] = False # set bool for p_red_finished to false + #t1 = time.perf_counter_ns() + lane_detection(image_bgr, params_for_blob_detection) + #print(f"max frame color red: {shm_redframe.max()}") + if show_opencv_window: + cv.imshow("red", shm_redframe) + cv.waitKey(1) + #print(f"processing red finished. frame: {framenumber}") + shm_bools[1] = True # set bool for p_red_finished to true + #t2 = time.perf_counter_ns() + #print(f"processing time for red channel: {round((t2-t1)*1e-6,2)} ms") + + last_processed_frame = framenumber + shm_bools[4] = False # process ended + # elif shm_bools[0] == False: + # pass + #print(f"no new red frame") + + # image processing finished + + + + except KeyboardInterrupt: + try: + shm_bool_init.close() + shm_framenumber_init.close() + shm_redframe_init.close() + + except FileNotFoundError: + # Memory already destroyed + pass + +# ---------------------------------------------------------------------------- +# main +def main(): + start = time.perf_counter() + + try: + # create processes + p_red = Process(target=processing_bgr) + processes = [p_red] + + print(f"waiting 1 second to create processes") + time.sleep(1) # sind prozesse schon vorhanden + + # start acitivity of processes + for process in processes: + process.start() + + # start capturing + get_frames_from_picamera() + + + print('*******************************') + # this below code is only executed if the loop in take_image_picamera_opencv is breaked + # In real use case there will be no end of this program + + for process in processes: + process.terminate() + + # print time measuring + end = time.perf_counter() + print(f'Script finished in {round(end-start, 2)} s') + + # close each SharedMemory instance and unlink to release the shared memory + shm_bools_create.close() + shm_bools_create.unlink() + shm_framenumber_create.close() + shm_framenumber_create.unlink() + shm_redframe_create.close() + shm_redframe_create.unlink() + shm_greenframe_create.close() + shm_greenframe_create.unlink() + shm_blueframe_create.close() + shm_blueframe_create.unlink() + + except KeyboardInterrupt: + # Normally this prgoram never gets keyboard interrupted! But here this case is nevertheless handled + # End Script + try: + # close each SharedMemory instance and unlink to release the shared memory + shm_bools.close() + shm_bools.unlink() + shm_framenumber_create.close() + shm_framenumber_create.unlink() + shm_redframe_create.close() + shm_redframe_create.unlink() + shm_greenframe_create.close() + shm_greenframe_create.unlink() + shm_blueframe_create.close() + shm_blueframe_create.unlink() + + except FileNotFoundError: + # Memory already destroyed + pass + + +if __name__ == "__main__": + main() diff --git a/90_ZielSW/Multiprocessing/Lanedetection_Picamera_V03.py b/90_ZielSW/Multiprocessing/Lanedetection_Picamera_V03.py new file mode 100644 index 0000000..0f2050c --- /dev/null +++ b/90_ZielSW/Multiprocessing/Lanedetection_Picamera_V03.py @@ -0,0 +1,925 @@ + +import cv2 as cv +import numpy as np + +import picamera +from picamera.array import PiRGBArray +from fractions import Fraction + +import time +from datetime import datetime +import os +import sys + +import numpy as np +import math as M + +from multiprocessing import Process, shared_memory + + +#---------------------------------------------------------------------- +# Define camera settings +SENSOR_MODE = 4 # corresponding sensor mode to resolution +OUTPUT_RESOLUTION = (192, 144) # (width, heigth) +image_width = OUTPUT_RESOLUTION[0] +image_heigth = OUTPUT_RESOLUTION[1] +number_of_colorchannels = 3 # r, g, b +size_of_frame=int(image_heigth*image_heigth*number_of_colorchannels) +frame_dimension = int(number_of_colorchannels) + +AWB_MODE = 'off' # Auto white balance mode +AWB_GAINS = (1.395, 1.15) # White Balance Gains to have colours read correctly: (red, blue). Int, floar or fraction are valid. +BRIGHTNESS = 25 # sets the brightness setting of the camera. default is 50. [0-100] + #the brighter, the brighter the LEDs and the higher the RGB values and vice versa! +CONTRAST = 100 # sets the contrast setting of the camera. The default value is 0. [-100 ... 100] + +SHUTTER_SPEED = 50 # [µs] + +ISO = 320 # ISO value +EXPOSURE_MODE = 'off' +FRAMERATE = 25 # frames per second. 40 fps is max for sensor mode 4 + +SLEEP_TIME = 2 # Time for sleep-mode for the camera in seconds. My default: 2 s + +# miscellaneous parameters +max_value_of_uint64 = int((2**64) - 1) # @30 fps: konservative calculated driving time: 1.95*1e10 years --> Integer overflow not relevant. + +#---------------------------------------------------------------------- +# Parameters +pixels_per_mm = 32/24.25 #[px/mm] for 120 mm camera height for resolution: 192x144 +# pixels_per_mm = 71/24.25 #[px/mm] for 120 mm camera height for resolution: 416x320 +# pixels_per_mm = 107/24.25 #[px/mm] for 120 mm camera height for resolution: 640x480 + +# Offset Camera Sensor in Scooty according to Scooty-KS +x_offset_camera_mm = 100 # [mm] +y_offset_camera_mm = 50 # [mm] +x_offset_camera_px = x_offset_camera_mm*pixels_per_mm # [px] +y_offset_camera_px = y_offset_camera_mm*pixels_per_mm # [px] + +# image parameters +image_heigth = OUTPUT_RESOLUTION[1] # shape [0] +image_width = OUTPUT_RESOLUTION[0]# shape[1] +# calculate center of image +[x_0, y_0] = np.array([image_width/2, image_heigth/2], dtype=np.uint16) + + +threshold_color_detection = 60 # values under this will not be considered as active leds in each color channel + +# Parameters for Blob/LED Detection +minDiameter_mm = 1.5 # [mm] minimum diameter of detected blob/LED +maxDiameter_mm = 8 # [mm] maximum diameter of detected blob/LED + +# Define color numbers to identify the color channels in the matrix with all detected LEDs. No string, because numpy array should stay uint16 +color_number_off = 0 +color_number_red = 1 +color_number_green = 2 +color_number_blue = 3 +color_number_yellow = 4 +color_number_magenta = 5 +color_number_cyan = 6 +color_number_white = 7 + +show_opencv_window = True # show opencv window +draw_opencv = True # draw lane and so on + +print_additional_info = False + +# create shared Memorys for main-process +# shared memory for bools +shm_bools_pre=np.array([False, False, False, False, False, False, False, False, False, False], dtype=np.bool8) # create numpy array with bools stored in it + # [0]: newframe [1]: p_red_finished [2]: p_green_finished [3]: p_blue_finished + # [4]: p_red_started [5]: p_green_started [6]: p_blue_started + # [7]: p_red_start_trigger [8]: p_green_start_trigger [9]: p_blue_start_trigger +size_of_buffer = shm_bools_pre.nbytes +print(f"size of buffer: {size_of_buffer}") # size of buffer: 10 +print(f"shm_bools dtype: {shm_bools_pre.dtype}") # dtype: bool +shm_bools_create = shared_memory.SharedMemory(name="shm_bools", create=True, size=shm_bools_pre.nbytes) # create a new shared memory block +shm_bools = np.ndarray(shm_bools_pre.shape, dtype=shm_bools_pre.dtype, buffer=shm_bools_create.buf) # create a NumPy array backed by shared memory +shm_bools[:] = shm_bools_pre[:] # Copy the original data into shared memory + +# print(shm_bool) +# print(shm_bools.name) + +# shared memory for framenumber +shm_framenumber_pre=np.array([0], dtype=np.uint64) +size_of_buffer = shm_framenumber_pre.nbytes +print(f"size of framenumber-buffer: {size_of_buffer}") #8 +print(f"shm_framenumber dtype: {shm_framenumber_pre.dtype}") #uint64 +shm_framenumber_create = shared_memory.SharedMemory(name="shm_framenumber", create=True, size=shm_framenumber_pre.nbytes) # create a new shared memory block +shm_framenumber = np.ndarray(shm_framenumber_pre.shape, dtype=shm_framenumber_pre.dtype, buffer=shm_framenumber_create.buf) # create a NumPy array backed by shared memory +shm_framenumber[:] = shm_framenumber_pre[:] # Copy the original data into shared memory +# print(shm_framenumber) # [0] +# print(shm_framenumber_create.name) # shm_framenumber + +# shared memory for red, green, blue frame +int_black = 0 # integer for black color/ no color +shm_colorframes_pre = np.full(\ + (image_heigth,image_width), \ + int_black, dtype=np.uint8) +size_of_buffer = shm_colorframes_pre.nbytes +print(f"size of colorframe-buffer: {size_of_buffer}") #133 120 +print(f"shm_colorframes_pre dtype: {shm_colorframes_pre.dtype}") #uint8 +shm_redframe_create = shared_memory.SharedMemory(name="shm_redframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block +shm_greenframe_create = shared_memory.SharedMemory(name="shm_greenframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block +shm_blueframe_create = shared_memory.SharedMemory(name="shm_blueframe", create=True, size=shm_colorframes_pre.nbytes) # create a new shared memory block +shm_redframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_redframe_create.buf) # create a NumPy array backed by shared memory +shm_greenframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_greenframe_create.buf) # create a NumPy array backed by shared memory +shm_blueframe = np.ndarray(shm_colorframes_pre.shape, dtype=shm_colorframes_pre.dtype, buffer=shm_blueframe_create.buf) # create a NumPy array backed by shared memory +shm_redframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory +shm_greenframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory +shm_blueframe[:] = shm_colorframes_pre[:] # Copy the original data into shared memory +# shared memory bgr frame +int_black = 0 # integer for black color/ no color +shm_frame_pre = np.full(\ + (image_heigth,image_width, number_of_colorchannels), \ + int_black, dtype=np.uint8) +size_of_buffer = shm_frame_pre.nbytes +print(f"size of colorframe-buffer: {size_of_buffer}") #399 360 = 416 x 320 x 3 +print(f"shm_colorframes_pre dtype: {shm_frame_pre.dtype}") #uint8 +shm_frame_create = shared_memory.SharedMemory(name="shm_frame", create=True, size=shm_frame_pre.nbytes) # create a new shared memory block +shm_frame = np.ndarray(shm_frame_pre.shape, dtype=shm_frame_pre.dtype, buffer=shm_frame_create.buf) # create a NumPy array backed by shared memory +shm_frame[:] = shm_frame_pre[:] # Copy the original data into shared memory + + +# ---------------------------------------------------------------------------- +# Define Functions +def points_trafo(detected_LEDs, alpha_rad, dx, dy): + """Tranfsform points of LED to lane in KS-LED""" + detected_LEDs_trafo = detected_LEDs.copy() # copy, becuase else it is only a pointer + detected_LEDs_trafo = detected_LEDs_trafo.astype(np.int16) # avoid integer overflow + x_pnts = detected_LEDs_trafo[:,0] + y_pnts = detected_LEDs_trafo[:,1] + + # Translation + x1 = x_pnts-dx-x_0 + x_trafo=x1 + y1 = y_pnts-dy-y_0 + y_trafo = y1 + + # Rotation. Winkel Sensor im UZS, also negativ zu mathematischer definiton + x_trafo = np.cos(-alpha_rad)*x1-np.sin(-alpha_rad)*y1 + detected_LEDs_trafo[:,0] = x_trafo + y_trafo = np.sin(-alpha_rad)*x1+np.cos(-alpha_rad)*y1 + detected_LEDs_trafo[:,1] = y_trafo + + #sort points along lane: x_2, y_2 -axis (KS_LED) + detected_LEDs_trafo = detected_LEDs_trafo[detected_LEDs_trafo[:, 0].argsort(kind='quicksort')] + return detected_LEDs_trafo + +def construct_lane(detected_LEDs, img_bgr): + """construct the lane""" + # This function is partially commented in german, because higher math is used + # clearer what is trying to be achieved + + # get points + # xy_pnts = detected_LEDs[:,0:2] + # x_pnts = detected_LEDs[:,0] + # y_pnts = detected_LEDs[:,1] + + # approach 2: + # fit line through centers of LEDs in KS_0 + # DIST_L": the simplest and the fastest least-squares method: the simple euclidean distance + param = 0 # not used for DIST_L2 + reps = 0.001 # Sufficient accuracy for the radius (distance between the coordinate origin and the line). + aeps = 0.001 # Sufficient accuracy for the angle. + [dx, dy, x_2, y_2] = cv.fitLine(detected_LEDs[:,0:2], cv.DIST_L2, param, reps, aeps) + # x2, y2: same as: mean_of_leds = np.mean([x_pnts, y_pnts], 1) + + alpha_rad = np.arctan2(dy, dx) # calculate angle of line + alpha = np.arctan2(dy, dx)*180/np.pi # calculate angle of line + # print(f"Lane: dx: {dx}, dy:{dy}, x2:{x_2}, y2:{y_2}, alpha:{alpha}°") + if print_additional_info: + print(f"Lane: alpha:{alpha[0]}°") + + # get smallest distance to point an line + # Berechnung nach: Repetitorium Höhere Mathematik, Wirth + # Gerade: x = a+ t*b + # Punkt : OP = p + # d = abs(b x (p-a))/(abs(b)) + # info: np.array()[:,0] --> gets only array with 1 dimensions with desired values + p = np.array([x_0, y_0]) + a = np.array([x_2, y_2])[:,0] + b = np.array([np.cos(alpha_rad), np.sin(alpha_rad)])[:,0] # Richtungsvektor + c = p-a + + # Betrag von Vektor: np.linalg.norm(vec) + cross= np.cross(b, c) + d = np.linalg.norm(cross)/np.linalg.norm(b) # distance [px] + #print(f"d: {round(d,2)}") + + # Fußpunkt (X_LED, Y_LED) + t_0_dot = np.dot(c, b) + t_0_norm = (np.linalg.norm(b)**2) + t_0 = t_0_dot/t_0_norm + [x_LED, y_LED] = (a+t_0*b) + if print_additional_info: + print(f"x_LED: {x_LED}, y_LED: {y_LED}") + + # Abstand (dx, dy) Fußpunkt zu KS_0 + dx_LED = x_LED - x_0 + dx_LED_mm = dx_LED*(1/pixels_per_mm) + dy_LED = y_LED - y_0 + dy_LED_mm = dy_LED*(1/pixels_per_mm) + if print_additional_info: + print(f"dx_LED:{dx_LED} [px] , dy_LED:{dy_LED} [px]") + print(f"dx_LED:{dx_LED_mm} [mm] , dy_LED:{dy_LED_mm} [mm]") + + # Abstand (dx, dy) Fußpunkt von Bildmitte zu KS_Scooty + # Diese Werte zurückgeben + dx_LED_scooty = x_LED - x_0 + x_offset_camera_px + dx_LED_scooty_mm = dx_LED_scooty*(1/pixels_per_mm) + dy_LED_scooty = y_LED - y_0 + y_offset_camera_px + dy_LED_scooty_mm = dy_LED_scooty*(1/pixels_per_mm) + if print_additional_info: + print(f"dx_LED_scooty:{dx_LED_scooty} [px] , dy_LED_scooty:{dy_LED_scooty} [px]") + print(f"dx_LED_scooty:{dx_LED_scooty_mm} [mm] , dy_LED_scooty:{dy_LED_scooty_mm} [mm]") + + # Punkte Trafo, um sortierte position der LEDs entlang Spur zu erhalten + # Bei normal detected kann bei vertikaler LED zb Fehler entstehen und dann muster: 211323233 -> daher mit dieser sortierten weitermachen + detected_LEDs_KS_LED = points_trafo(detected_LEDs, alpha_rad, dx_LED, dy_LED) + if print_additional_info: + print(f"Detected LEDs in KS_LED:(x2, y2):\n {detected_LEDs_KS_LED}") + + #----------------------------------- + # draw useful lines and points + + # draw lane line + if draw_opencv: + pt_0 = (a+b*np.array([-300, -300])).astype(np.int32) + pt_1 = (a+b*np.array([300, 300])).astype(np.int32) + #print(f"pt_0: {pt_0}, pt_1: {pt_1}") + cv.line(img_bgr, pt_0, pt_1, (255,255,255),1) # draw lane + + # draw dx dy + cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # shortest distance from KS_0 to KS_LED --> Lot + # cv.line(img_bgr, (int(x_0), int(y_0)), (int(x_LED), int(y_0)), (0,0,255), 2) # only dx + # cv.line(img_bgr, (int(x_LED), int(y_0)), (int(x_LED), int(y_LED)), (0,0,255), 2) # only dy + + #draw additional points + cv.circle(img_bgr, (int(x_2), int(y_2)), 5,(255,128,255),-1) #pink. Center of points + #cv.putText(img_bgr, '(x2, y2)',(int(x_2)+5, int(y_2)-5), cv.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), cv.LINE_AA) + cv.circle(img_bgr, (int(x_LED), int(y_LED)), 5,(170,255,0),-1) # lime green. Fußpunkt + + if show_opencv_window: + cv.imshow("Lane", img_bgr) + + return dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED + +def convert_rgb_to_grayscale_average(image_bgr): + """This function converts the RGB image into an grayscale image. + Algorithm: Average: Y = (R+G+B)/3""" + + # convert dtype to prevent integer overflow while addition + image_bgr = image_bgr.astype(np.uint16, copy=False) + image_gray = (image_bgr[:,:,0]+image_bgr[:,:,1]+image_bgr[:,:,2])/3 # add values / do conversion + image_gray = image_gray.astype(np.uint8, copy=False) # convert back to uint8 + + return image_gray + +def create_detector(params_for_blob_detection): + detector = cv.SimpleBlobDetector_create(params_for_blob_detection) # Set up the detector with specified parameters. + return detector + +def define_parameters_for_blob_detection(): + """set parameters for simple blob detector""" + params = cv.SimpleBlobDetector_Params() + + # Threshold for Convert the source image to binary images by applying thresholding + # with several thresholds from minThreshold (inclusive) to maxThreshold (exclusive) + # with distance thresholdStep between neighboring thresholds. + # Since the Grayscale image is dark if only one color channel is active, + # the Threshold values have to be set like this. + # particularly the thresholdStep-Value has to be low + params.minThreshold=20 # reminder: this value is set for grayscale image + params.maxThreshold=255 + params.thresholdStep=1 + + params.filterByColor=False # do not filter blobs by color + + # Filter blobs by Area + params.filterByArea=True + minDiameter_px = minDiameter_mm*pixels_per_mm # [px] minimum diameter of detected blob/LED + maxDiameter_px = maxDiameter_mm*pixels_per_mm # [px] maximum diameter of detected blob/LED + minArea_px2 = np.pi/4*minDiameter_px**2 + maxArea_px2 = np.pi/4*maxDiameter_px**2 + + params.minArea = minArea_px2 # min Area of a blob in px^2 + params.maxArea = maxArea_px2 # max Area of a blob in px^2. + # reasons for not filtering maxArea: motion blur + rolling shutter --> larger Area + + # Filter by Inertia + params.filterByInertia=False + params.minInertiaRatio = 0.2 # [0-1] + + # Filter by Convexity + params.filterByConvexity=False + params.minConvexity = 0.2 # [0-1] + + # Filter by Circularity + params.filterByCircularity=False + params.minCircularity = 0.4 # [0-1] + + # params.minDistBetweenBlobs = minDist_px # this has no effect + + return params + + +def detect_LED_positions_in_grayscale(image_gray, image_bgr, detector): + start_processing = time.perf_counter() + keypoints = detector.detect(image_gray) # Detect blobs --> LEDs + end_processing = time.perf_counter() + time_processing = end_processing-start_processing + time_processing = time_processing*1000 + time_processing = round(time_processing, 2) + print(f'processing time detector: {time_processing} ms') + + number_of_detected_leds = len(keypoints) + + if number_of_detected_leds != 0: + # print information of keypoints + print(f"detected LEDs: {number_of_detected_leds}") + + #Pre-allocate matrix for numpy + number_of_rows = number_of_detected_leds + number_of_columns = 3 + position_of_leds = np.zeros((number_of_rows, number_of_columns), dtype=np.uint16) + for i, k in enumerate(keypoints): + # x_pos = round(k.pt[0],0) # x position + # y_pos = round(k.pt[1],0) # y position + # print(f"x: {x_pos} y: {y_pos}") + diameter_px = round(k.size,2) + diameter_mm = round(diameter_px*1/pixels_per_mm,2) + #print(f"diameter [px]: {diameter_px} diameter [mm]: {diameter_mm}") # diameter + # area_px2 = round(np.pi/4*k.size**2,0) # area in px^2 + # area_mm2 = round(area_px2*(1/pixels_per_mm)**2,0) + # print(f"area [px^2]: {area_px2} area [mm^2]: {area_mm2}") + # print('') + + # calculate parameters to transfer to matrix + # x_pos = int(np.ceil(x_pos)) + # y_pos = int(np.ceil(y_pos)) + # Fill matrix + # position_of_leds[i,:] = [x_pos,y_pos, 0] + position_of_leds[i,0] = int(np.ceil(k.pt[0])) # x positon + position_of_leds[i,1] = int(np.ceil(k.pt[1])) # y position + + + if draw_opencv: + # draw the keypoints on the original image + # cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob + blobs = cv.drawKeypoints(image=image_bgr, keypoints=keypoints, color=(255, 255, 255), \ + outImage=np.array([]), flags= cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) + + if show_opencv_window: + # cv.imshow("grayscale", image_gray) + cv.imshow("Detected", blobs) + return position_of_leds + + else: + print(f"No LEDs were detected") + return None + +def detect_position_of_all_LEDs_grayscale(image_gray, image_bgr, detector): + position_of_LEDs = detect_LED_positions_in_grayscale(image_gray, image_bgr, detector) + + if position_of_LEDs is not None: + return position_of_LEDs + else: + return None + +def get_color_of_leds(matrix_of_LEDs, image_bgr): + # is image_r[y_pos, x_pos] = image_bgr[y_pos,x_pos, 2] ? --> yes. No need to split the color channels. + + offset = 0 # half of length from rectangle which is going to be used to determine the color around the middle point of the blob/led + # offset = 0 --> only the value from the middle point of the blob/led + # offset=1 --> 9 values, offset=2-->25 values + + for led in matrix_of_LEDs: + x_pos = led[0] # uint16 + y_pos = led[1] # uint16 + + # get values of color channels in region around middle point of blob/led: + # +1 at stop index, because it is not inclusive + region_around_blue_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 0] # uint8 + region_around_green_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 1] # uint8 + region_around_red_led = image_bgr[y_pos-offset:y_pos+offset+1, x_pos-offset:x_pos+offset+1, 2] # uint8 + + # average of the values + # convert dtype to prevent integer overflow while addition + region_around_red_led = region_around_red_led.astype(np.uint16, copy=False) + region_around_green_led = region_around_green_led.astype(np.uint16, copy=False) + region_around_blue_led = region_around_blue_led.astype(np.uint16, copy=False) + # sum all elements in matrix and divide with number of elements + number_of_elements= region_around_blue_led.size + value_of_red_led = region_around_red_led.sum()/number_of_elements # float64, if not integer result + value_of_green_led = region_around_green_led.sum()/number_of_elements # float64, if not integer result + value_of_blue_led = region_around_blue_led.sum()/number_of_elements # float64, if not integer result + + # determine which leds are active: + # if value > threshold --> led is active + status_blue_led = False; status_green_led = False; status_red_led = False + if value_of_blue_led > threshold_color_detection: + status_blue_led = True + if value_of_green_led > threshold_color_detection: + status_green_led = True + if value_of_red_led > threshold_color_detection: + status_red_led = True + + # determine color by checking the cases: + # case 1: red + if status_blue_led==False and status_green_led==False and status_red_led==True: + color = color_number_red + # case 2: green + elif status_blue_led==False and status_green_led==True and status_red_led==False: + color = color_number_green + # case 3: blue + elif status_blue_led==True and status_green_led==False and status_red_led==False: + color = color_number_blue + # case 4: yellow = red + green + elif status_blue_led==False and status_green_led==True and status_red_led==True: + color = color_number_yellow + # case 5: magenta = red + blue + elif status_blue_led==True and status_green_led==False and status_red_led==True: + color = color_number_magenta + # case 6: cyan = green + blue + elif status_blue_led==True and status_green_led==True and status_red_led==False: + color = color_number_cyan + # case 7: white = red + green + blue + elif status_blue_led==True and status_green_led==True and status_red_led==True: + color = color_number_white + # case 8: led not active + # this case can not occur, because no inactive led can be detected from the implemented blob-algorithm in detect_LED_positions_in_grayscale + else: + color = color_number_off + + # fill matrix with color + led[2] = color # uint16 + + return matrix_of_LEDs + + +def detect_LEDs_with_grayscale(image_bgr, detector): + # convert rgb to grayscale image + # start_m1 = time.perf_counter() + image_gray = convert_rgb_to_grayscale_average(image_bgr) + # end_m1 = time.perf_counter() + # time_processing = end_m1-start_m1 + # time_processing = time_processing*1000 + # time_processing = round(time_processing, 2) + # print(f'processing time conversion: {time_processing} ms') + + # get position of leds + position_of_LEDs = detect_position_of_all_LEDs_grayscale(image_gray=image_gray, image_bgr=image_bgr, detector=detector) + + #position_of_LEDs = None + + if position_of_LEDs is not None: + # determine color of leds and add to matrix + detected_LEDs = get_color_of_leds(position_of_LEDs, image_bgr) + return detected_LEDs + else: + return None + + +def lane_detection(image_bgr, detector): + # Detect LEDs + print(f"Detect LEDs and color:") + detected_LEDs = detect_LEDs_with_grayscale(image_bgr, detector) + + if detected_LEDs is not None: + # Contruct lane + #print(f"_____________________________________") + # print("Contruct lane") + dx_LED_scooty_mm, dy_LED_scooty_mm, detected_LEDs_KS_LED = \ + construct_lane(detected_LEDs, image_bgr) + + # print result + if print_additional_info: + print(f"Detected LEDs relative to image-center(x0,y0):\n{detected_LEDs}") + return detected_LEDs + else: + return None + + +# ------------------- +# Define Funcions +def get_frames_from_picamera(): + # newframe= shm_bools[0] # do not use this! no updted values in "newframe" + # framenumber = shm_framenumber[0] + + # Initialise Camera + with picamera.PiCamera() as camera: + with PiRGBArray(camera) as output: + # Set camera settings + camera.sensor_mode = SENSOR_MODE # force camera into desired sensor mode + camera.resolution = OUTPUT_RESOLUTION # frame will be resized from GPU to this resolution. No CPU usage! + camera.framerate = FRAMERATE + + camera.awb_mode = AWB_MODE + camera.awb_gains = AWB_GAINS + + camera.iso = ISO + camera.shutter_speed = SHUTTER_SPEED + + time.sleep(SLEEP_TIME) # wait for iso gains and digital_gain and analog_gain to settle before fixing the gains with exposure_mode = off + camera.exposure_mode = EXPOSURE_MODE + + time.sleep(1) # wait before applying brightness and contrast + camera.brightness = BRIGHTNESS + camera.contrast = CONTRAST + time.sleep(SLEEP_TIME) # Camera warm-up time to apply settings + + t_start= time.perf_counter() # save time for fps calculation + + for frameidx, frame in enumerate(camera.capture_continuous(output, format='bgr', use_video_port=True)): + # General information: + # - always the newest frame is recieved: processing must be faster than fps if every frame should be processed + + shm_bools[0] = True + framenumber = frameidx+1 # frameidx starts with 0, framenumber with 1 + + + shm_framenumber[0] = framenumber + #print('') + #print(f"new frame: {framenumber}") + + #image = frame.array # raw NumPy array without JPEG encoding + + #b,g,r = cv.split(frame.array) # split colour channels of raw NumPy array without JPEG encoding + shm_frame[:] = frame.array + #shm_redframe[:] = r + #shm_greenframe[:] = g + #shm_blueframe[:] = b + # for better performance one can assign directly in funtion line the values to the shm_memorys: shm_red, .. , ... = cv.split(..) + + shm_bools[7:10]=[True] # trigger the start of the processing for each colorchannel + #print(shm_bools[7], shm_bools[8], shm_bools[9]) + + #display_image_with_text(image, shutter_speed, framenumber, camera_exposure_speed, trigger_record_OpenCV, out) # show the frame + + output.truncate(0) # clear the stream for next frame + + if framenumber == 500: # 5 sek @ 30 fps, only for performance measuring + t_stop=time.perf_counter() + print(f"calculated fps: {framenumber/(t_stop-t_start)}") + break + + +def display_image_with_text(img, shutter_speed, framenumber, camera_exposure_speed, trigger_record_OpenCV, out): + img = img.copy() # make copy of image and do not modify the original image + + # please activate only one trigger once + trigger_show_brightness = 0 # trigger for (not) calculating andshowing the brightness of the image+ + if trigger_show_brightness == 1: + arithmetic_mean_of_brightness_per_pixel_relative = calc_arithmetic_mean_of_brightness_per_pixel(img) + + trigger_show_max_brightness_values_of_colour_channels = 0 # trigger for (not) calculating and showing max values of colour chanels + if trigger_show_max_brightness_values_of_colour_channels == 1: + r_max, g_max, b_max = get_max_rgb_values(img) + + font = cv.FONT_HERSHEY_SIMPLEX # font + fontScale = 1 # fontScale + color = (255, 255, 255) # Font colour in BGR + thickness = 1 # Line thickness in px + + # set text position + frame_width = int(img.shape[1]) + frame_height = int(img.shape[0]) + text_start_position_Y = int(round(frame_height*0.12)) # start position of text in pixels 12 % of frame height + text_linespacing = 50 # line spacing between two strings in pixels + # text_start_position_X = int(frame_width/4) # start text from 1/4 of image width + text_start_position_X = int(0) # start text from left edge of image + + # set position in (x,y)-coordinated from top left corner. Bottom-left corner of the text string in the image. + pos_1 = (text_start_position_X, text_start_position_Y) # start text from 1/4 of image width + pos_2 = (text_start_position_X, text_start_position_Y+text_linespacing) # start text from 1/4 of image width + pos_3 = (text_start_position_X, text_start_position_Y+2*text_linespacing) # start text from 1/4 of image width + if trigger_show_brightness==1 or trigger_show_max_brightness_values_of_colour_channels==1: + pos_4 = (text_start_position_X, text_start_position_Y+3*text_linespacing) # start text from 1/4 of image width + + + # define text to display + text_line_1 = f"set ss: {shutter_speed} us" + text_line_3 = f"Frame: {framenumber}" + text_line_2 = f"ret exs: {camera_exposure_speed} us" + if trigger_show_brightness==1: + if arithmetic_mean_of_brightness_per_pixel_relative >= 0.01: + text_line_4 = f"brightness: {round(arithmetic_mean_of_brightness_per_pixel_relative*100,2)} %" + elif arithmetic_mean_of_brightness_per_pixel_relative < 0.01: + text_line_4 = f"brightness: {round(arithmetic_mean_of_brightness_per_pixel_relative*10e3,2)} pm" + if trigger_show_max_brightness_values_of_colour_channels==1: + text_line_4 = f"max: r:{r_max} g:{g_max} b:{b_max}" + + + # put the text into the image + #image_text_1 = cv.putText(img, text_line_1, pos_1, font, + # fontScale, color, thickness, cv.LINE_AA) + #image_text_2 = cv.putText(img, text_line_2, pos_2, font, + # fontScale, color, thickness, cv.LINE_AA) + #image_text_3 = cv.putText(img, text_line_3, pos_3, font, + # fontScale, color, thickness, cv.LINE_AA) + if trigger_show_brightness==1 or trigger_show_max_brightness_values_of_colour_channels==1: + image_text_4 = cv.putText(img, text_line_4, pos_4, font, + fontScale, color, thickness, cv.LINE_AA) + + + cv.imshow("Current Frame", img) # display the image + if trigger_record_OpenCV == 1: + out.write(img) # write frame to Video + + +def calc_arithmetic_mean_of_brightness_per_pixel(image): + """Calculate overall brightness per pixel of the image. Mittelere Helligkeit pro pixel berechnen.""" + #Comment: So rechenintensiv, dass man kein Blitzen sieht im Bild. (Oder sehr selten bzw. schwach). Daher anzeige von max-werten + + b,g,r = cv.split(image) # OpenCV works with bgr. Image is also speciefied to be captured in bgr. + r=r.astype('uint16') # set dtype of one colour to uint16, because the sum of 255+255+255 >255 =765 + #the rest will also be uint16 then + image_heigth = r.shape[0] + image_width = r.shape[1] + + number_of_colour_channels = 3 + arithmetic_mean_of_brightness_image = np.sum((r+g+b)/number_of_colour_channels) + arithmetic_mean_of_brightness_per_pixel = arithmetic_mean_of_brightness_image/(image_width*image_heigth) + + max_possible_brightness = 255 # maximum possible brightness + arithmetic_mean_of_brightness_per_pixel_relative = arithmetic_mean_of_brightness_per_pixel/max_possible_brightness + + return arithmetic_mean_of_brightness_per_pixel_relative + +def get_max_rgb_values(image): + """get max values of colour channels""" + b,g,r = cv.split(image) # OpenCV works with bgr. Image is also speciefied to be captured in bgr. + + r_max=r.max() + g_max=g.max() + b_max=b.max() + + return r_max, g_max, b_max + +def create_folder_for_captures(): + # Create folder for saving the captured pictures + now = datetime.now(); d1 = now.strftime("%Y-%m-%d %H-%M") + path_cwd = os.getcwd() + + path_saveFolder = path_cwd+r"/Capture_"+d1 + try: + os.mkdir(path_saveFolder) + folder_exists = True + except OSError: + print("Error! Ending script.") + quit() + + return path_saveFolder, folder_exists + +def do_processing(): + time.sleep(0.001) + #print("ohh i am doing high complex image analysis with computer vision") + +def do_processing_frame_r(frame): + print(f"max frame color red: {frame.max()}") +def do_processing_frame_g(frame): + print(f"max frame color green: {frame.max()}") +def do_processing_frame_b(frame): + print(f"max frame color blue: {frame.max()}") + + + +def processing_red(): + shm_bool_init = shared_memory.SharedMemory(name="shm_bools") # Attach to existing shared memory block + shm_bools = np.ndarray((10,), dtype=np.bool8, buffer= shm_bool_init.buf) # do not: newframe = np.array(...)[0] --> then one can not assign new value in main script + newframe = shm_bools[0] + p_red_finished = shm_bools[1] # not used, but for clarity + p_red_started = shm_bools[4] + + shm_framenumber_init = shared_memory.SharedMemory\ + (name="shm_framenumber") # Attach to existing shared memory block + shm_framenumber = np.ndarray((1,), dtype=np.uint64, \ + buffer= shm_framenumber_init.buf) + # framenumer = shm_framenumber[0] + + shm_redframe_init = shared_memory.SharedMemory\ + (name="shm_redframe") # Attach to existing shared memory block + shm_redframe = np.ndarray((image_heigth,image_width), dtype=np.uint8, \ + buffer= shm_redframe_init.buf) + + + i=0 + while True: + try: + framenumber = shm_framenumber[0] + if i==0: + last_processed_frame = framenumber + + conditions_for_first_start = (i==0) and\ + (shm_bools[0] == True) and \ + (shm_bools[1] == False) and (shm_bools[2] == False) and (shm_bools[3] == False) \ + and (shm_bools[7] == True) + + conditions_for_starting_processing = (framenumber>last_processed_frame) and (shm_bools[7] == True) + # newframe and all color-channel-processings have to be finished + + if conditions_for_first_start == True: + shm_bools[4] = True # process started + shm_bools[7] = False # reset trigger + + shm_bools[1] = False # set bool for p_red_finished to false + #t1 = time.perf_counter_ns() + do_processing() + i += 1 + #print(f"first processing red finished. frame: {framenumber}") + shm_bools[1] = True # set bool for p_red_finished to true + + shm_bools[4] = False # process ended + elif conditions_for_starting_processing == True: + shm_bools[4] = True # process started + shm_bools[7] = False # reset trigger + #print(f"red: framenumber: {framenumber}, last_processed_frame: {last_processed_frame}") + + shm_bools[1] = False # set bool for p_red_finished to false + #t1 = time.perf_counter_ns() + do_processing() + #print(f"max frame color red: {shm_redframe.max()}") + if show_opencv_window: + cv.imshow("red", shm_redframe) + cv.waitKey(1) + #print(f"processing red finished. frame: {framenumber}") + shm_bools[1] = True # set bool for p_red_finished to true + #t2 = time.perf_counter_ns() + #print(f"processing time for red channel: {round((t2-t1)*1e-6,2)} ms") + + last_processed_frame = framenumber + shm_bools[4] = False # process ended + # elif shm_bools[0] == False: + # pass + #print(f"no new red frame") + + # image processing finished + + + + except KeyboardInterrupt: + try: + shm_bool_init.close() + shm_framenumber_init.close() + shm_redframe_init.close() + + except FileNotFoundError: + # Memory already destroyed + pass + +def processing_bgr(): + shm_bool_init = shared_memory.SharedMemory(name="shm_bools") # Attach to existing shared memory block + shm_bools = np.ndarray((10,), dtype=np.bool8, buffer= shm_bool_init.buf) # do not: newframe = np.array(...)[0] --> then one can not assign new value in main script + newframe = shm_bools[0] + p_red_finished = shm_bools[1] # not used, but for clarity + p_red_started = shm_bools[4] + + shm_framenumber_init = shared_memory.SharedMemory\ + (name="shm_framenumber") # Attach to existing shared memory block + shm_framenumber = np.ndarray((1,), dtype=np.uint64, \ + buffer= shm_framenumber_init.buf) + # framenumer = shm_framenumber[0] + + shm_frame_init = shared_memory.SharedMemory\ + (name="shm_frame") # Attach to existing shared memory block + shm_redframe = np.ndarray((image_heigth,image_width, number_of_colorchannels), dtype=np.uint8, \ + buffer= shm_frame_init.buf) + image_bgr = shm_redframe + + + params_for_blob_detection = define_parameters_for_blob_detection() + detector = create_detector(params_for_blob_detection) + + i=0 + while True: + try: + framenumber = shm_framenumber[0] + if i==0: + last_processed_frame = framenumber + + conditions_for_first_start = (i==0) and\ + (shm_bools[0] == True) and \ + (shm_bools[1] == False) and (shm_bools[2] == False) and (shm_bools[3] == False) \ + and (shm_bools[7] == True) + + conditions_for_starting_processing = (framenumber>last_processed_frame) and (shm_bools[7] == True) + # newframe and all color-channel-processings have to be finished + + if conditions_for_first_start == True: + print("first start") + shm_bools[4] = True # process started + shm_bools[7] = False # reset trigger + + shm_bools[1] = False # set bool for p_red_finished to false + #t1 = time.perf_counter_ns() + lane_detection(image_bgr, detector) # hier immer noch altes bild -->> Processing ist langsamer als neue bilder kommen.. + i += 1 + #print(f"first processing red finished. frame: {framenumber}") + shm_bools[1] = True # set bool for p_red_finished to true + + shm_bools[4] = False # process ended + elif conditions_for_starting_processing == True: + shm_bools[4] = True # process started + shm_bools[7] = False # reset trigger + #print(f"red: framenumber: {framenumber}, last_processed_frame: {last_processed_frame}") + + shm_bools[1] = False # set bool for p_red_finished to false + #t1 = time.perf_counter_ns() + cv.imshow("AAA", shm_redframe) + lane_detection(image_bgr, detector) + #print(f"max frame color red: {shm_redframe.max()}") + if show_opencv_window: + cv.waitKey(1) + #print(f"processing red finished. frame: {framenumber}") + shm_bools[1] = True # set bool for p_red_finished to true + #t2 = time.perf_counter_ns() + #print(f"processing time for red channel: {round((t2-t1)*1e-6,2)} ms") + + last_processed_frame = framenumber + shm_bools[4] = False # process ended + # elif shm_bools[0] == False: + # pass + #print(f"no new red frame") + + # image processing finished + + + + except KeyboardInterrupt: + try: + shm_bool_init.close() + shm_framenumber_init.close() + shm_redframe_init.close() + + except FileNotFoundError: + # Memory already destroyed + pass + +# ---------------------------------------------------------------------------- +# main +def main(): + start = time.perf_counter() + + try: + # create processes + p_red = Process(target=processing_bgr) + processes = [p_red] + + print(f"waiting 1 second to create processes") + time.sleep(1) # sind prozesse schon vorhanden + + # start acitivity of processes + for process in processes: + process.start() + + # start capturing + get_frames_from_picamera() + + + print('*******************************') + # this below code is only executed if the loop in take_image_picamera_opencv is breaked + # In real use case there will be no end of this program + + for process in processes: + process.terminate() + + # print time measuring + end = time.perf_counter() + print(f'Script finished in {round(end-start, 2)} s') + + # close each SharedMemory instance and unlink to release the shared memory + shm_bools_create.close() + shm_bools_create.unlink() + shm_framenumber_create.close() + shm_framenumber_create.unlink() + shm_redframe_create.close() + shm_redframe_create.unlink() + shm_greenframe_create.close() + shm_greenframe_create.unlink() + shm_blueframe_create.close() + shm_blueframe_create.unlink() + + except KeyboardInterrupt: + # Normally this prgoram never gets keyboard interrupted! But here this case is nevertheless handled + # End Script + try: + # close each SharedMemory instance and unlink to release the shared memory + shm_bools.close() + shm_bools.unlink() + shm_framenumber_create.close() + shm_framenumber_create.unlink() + shm_redframe_create.close() + shm_redframe_create.unlink() + shm_greenframe_create.close() + shm_greenframe_create.unlink() + shm_blueframe_create.close() + shm_blueframe_create.unlink() + + except FileNotFoundError: + # Memory already destroyed + pass + + +if __name__ == "__main__": + main() diff --git a/Installationsanleitung.docx b/Installationsanleitung.docx new file mode 100644 index 0000000..b5f5caf Binary files /dev/null and b/Installationsanleitung.docx differ diff --git a/Installationsanleitung.pdf b/Installationsanleitung.pdf new file mode 100644 index 0000000..804bf5a Binary files /dev/null and b/Installationsanleitung.pdf differ