diff --git a/custom_components/valetudo_vacuum_camera/camera.py b/custom_components/valetudo_vacuum_camera/camera.py index f88f207a..f067256a 100644 --- a/custom_components/valetudo_vacuum_camera/camera.py +++ b/custom_components/valetudo_vacuum_camera/camera.py @@ -6,11 +6,13 @@ from __future__ import annotations -from datetime import datetime, timedelta +import time +from datetime import timedelta from io import BytesIO import json import logging import os +import gc from typing import Optional from PIL import Image @@ -272,6 +274,8 @@ def __init__(self, hass, device_info): except (ValueError, IndexError, UnboundLocalError) as e: _LOGGER.error("Error while populating colors: %s", e) self.processor = CameraProcessor(self._shared) + gc.set_debug(True) + gc.DEBUG_LEAK.conjugate() async def async_added_to_hass(self) -> None: """Handle entity added toHome Assistant.""" @@ -302,9 +306,11 @@ def name(self) -> str: return self._attr_name def turn_on(self): + self._should_poll = True def turn_off(self): + gc.collect(2) self._should_poll = False @property @@ -418,130 +424,138 @@ async def async_update(self): self._shared.vacuum_state = await self._mqtt.get_vacuum_status() process_data = await self._mqtt.is_data_available(self._processing) if process_data: - self._processing = True - # if the vacuum is working, or it is the first image. - if ( - self._shared.vacuum_state == "cleaning" - or self._shared.vacuum_state == "moving" - or self._shared.vacuum_state == "returning" - ): - # grab the image - self._shared.image_grab = True - self._shared.frame_number = self.processor.get_frame_number() - # when the vacuum goes / is in cleaning, moving or returning - # do not take the automatic snapshot. - self._shared.snapshot_take = False - _LOGGER.info( - f"{self._shared.file_name}: Camera image data update available: {process_data}" - ) - # to calculate the cycle time for frame adjustment. - start_time = datetime.now() - pid = os.getpid() # Start to log the CPU usage of this PID. - proc = ProcInsp().psutil.Process(pid) # Get the process PID. - self._cpu_percent = round( - (proc.cpu_percent() / ProcInsp().psutil.cpu_count()) / 2, - 2, - ) - try: - parsed_json = await self._mqtt.update_data(self._shared.image_grab) - if parsed_json[1]: - self._shared.is_rand = True - self._rrm_data = parsed_json[0] - else: - parsed_json = parsed_json[0] - self._rrm_data = None - # Below bypassed code is for debug purpose only - ######################################################### - # parsed_json = await self.load_test_json( - # "custom_components/valetudo_vacuum_camera/snapshots/test.json") - ########################################################## - self._vac_json_available = "Success" - except ValueError: - self._vac_json_available = "Error" - pass + if self._cpu_percent is not None and self._cpu_percent > 80: + self._processing = False + self._image = self._shared.last_image + return self._image else: - # Just in case, let's check that the data is available + self._processing = True + # if the vacuum is working, or it is the first image. + if ( + self._shared.vacuum_state == "cleaning" + or self._shared.vacuum_state == "moving" + or self._shared.vacuum_state == "returning" + ): + # grab the image + self._shared.image_grab = True + self._shared.frame_number = self.processor.get_frame_number() + # when the vacuum goes / is in cleaning, moving or returning + # do not take the automatic snapshot. + self._shared.snapshot_take = False + _LOGGER.info( + f"{self._shared.file_name}: Camera image data update available: {process_data}" + ) + # to calculate the cycle time for frame adjustment. + start_time = time.perf_counter() pid = os.getpid() # Start to log the CPU usage of this PID. proc = ProcInsp().psutil.Process(pid) # Get the process PID. self._cpu_percent = round( (proc.cpu_percent() / ProcInsp().psutil.cpu_count()) / 2, 2, ) - if parsed_json is not None: - if self._rrm_data: - self._shared.destinations = await self._mqtt.get_destinations() - pil_img = await self.hass.async_create_task( - self.processor.run_async_process_valetudo_data(self._rrm_data) - ) - elif self._rrm_data is None: - pil_img = await self.hass.async_create_task( - self.processor.run_async_process_valetudo_data(parsed_json) - ) + try: + parsed_json = await self._mqtt.update_data(self._shared.image_grab) + if parsed_json[1]: + self._shared.is_rand = True + self._rrm_data = parsed_json[0] else: - # if no image was processed empty or last snapshot/frame - pil_img = self.empty_if_no_data() - # Converting the image obtained to bytes - # Using openCV would reduce the CPU and memory usage. - # On Py4 HA OS is not possible to install the openCV library. - buffered = BytesIO() - # backup the image - if pil_img: - self._last_image = pil_img - self._image_w = pil_img.width - self._image_h = pil_img.height - else: - pil_img = self.empty_if_no_data() - self._last_image = None # pil_img - self._image_w = pil_img.width - self._image_h = pil_img.height - pil_img.save(buffered, format="PNG") - bytes_data = buffered.getvalue() - self._image = bytes_data - # take a snapshot if we meet the conditions. - if self._shared.snapshot_take: - if self._shared.is_rand: - await self.take_snapshot(self._rrm_data, pil_img) + parsed_json = parsed_json[0] + self._rrm_data = None + # Below bypassed code is for debug purpose only + ######################################################### + # parsed_json = await self.load_test_json( + # "custom_components/valetudo_vacuum_camera/snapshots/test.json") + ########################################################## + self._vac_json_available = "Success" + except ValueError: + self._vac_json_available = "Error" + pass + else: + # Just in case, let's check that the data is available + pid = os.getpid() # Start to log the CPU usage of this PID. + proc = ProcInsp().psutil.Process(pid) # Get the process PID. + self._cpu_percent = round( + (proc.cpu_percent() / ProcInsp().psutil.cpu_count()) / 2, + 2, + ) + if parsed_json is not None: + if self._rrm_data: + self._shared.destinations = await self._mqtt.get_destinations() + pil_img = await self.hass.async_create_task( + self.processor.run_async_process_valetudo_data(self._rrm_data) + ) + elif self._rrm_data is None: + pil_img = await self.hass.async_create_task( + self.processor.run_async_process_valetudo_data(parsed_json) + ) else: - await self.take_snapshot(parsed_json, pil_img) - # clean up - del buffered, pil_img, bytes_data - _LOGGER.debug(f"{self._shared.file_name}: Image update complete") - processing_time = (datetime.now() - start_time).total_seconds() - self._attr_frame_interval = max(0.1, processing_time) - _LOGGER.debug( - f"Adjusted {self._shared.file_name}: Frame interval: {self._attr_frame_interval}" + # if no image was processed empty or last snapshot/frame + pil_img = self.empty_if_no_data() + # Converting the image obtained to bytes + # Using openCV would reduce the CPU and memory usage. + # On Py4 HA OS is not possible to install the openCV library. + buffered = BytesIO() + # backup the image + if pil_img: + self._last_image = pil_img + self._image_w = pil_img.width + self._image_h = pil_img.height + else: + pil_img = self.empty_if_no_data() + self._last_image = None # pil_img + self._image_w = pil_img.width + self._image_h = pil_img.height + pil_img.save(buffered, format="PNG") + bytes_data = buffered.getvalue() + self._image = bytes_data + self._shared.last_image = bytes_data + # take a snapshot if we meet the conditions. + if self._shared.snapshot_take: + if self._shared.is_rand: + await self.take_snapshot(self._rrm_data, pil_img) + else: + await self.take_snapshot(parsed_json, pil_img) + # clean up + del buffered, pil_img, bytes_data + _LOGGER.debug(f"{self._shared.file_name}: Image update complete") + processing_time = round((time.perf_counter() - start_time), 3) + # Adjust the frame interval to the processing time. + self._attr_frame_interval = max(0.1, processing_time) + _LOGGER.debug( + f"Adjusted {self._shared.file_name}: Frame interval: {self._attr_frame_interval}" + ) + gc.collect(2) + else: + _LOGGER.info( + f"{self._shared.file_name}: Image not processed. Returning not updated image." + ) + self._attr_frame_interval = 0.1 + self.camera_image(self._image_w, self._image_h) + # HA supervised memory and CUP usage report. + self._cpu_percent = round( + ( + (self._cpu_percent + proc.cpu_percent()) + / ProcInsp().psutil.cpu_count() + ) + / 2, + 2, ) - else: - _LOGGER.info( - f"{self._shared.file_name}: Image not processed. Returning not updated image." + memory_percent = round( + ( + (proc.memory_info()[0] / 2.0**30) + / (ProcInsp().psutil.virtual_memory().total / 2.0**30) + ) + * 100, + 2, ) - self._attr_frame_interval = 0.1 - self.camera_image(self._image_w, self._image_h) - # HA supervised memory and CUP usage report. - self._cpu_percent = round( - ( - (self._cpu_percent + proc.cpu_percent()) - / ProcInsp().psutil.cpu_count() + _LOGGER.debug( + f"{self._shared.file_name} System CPU usage stat: {self._cpu_percent}%" ) - / 2, - 2, - ) - memory_percent = round( - ( - (proc.memory_info()[0] / 2.0**30) - / (ProcInsp().psutil.virtual_memory().total / 2.0**30) + _LOGGER.debug( + f"{self._shared.file_name} Camera Memory usage in GB: " + f"{round(proc.memory_info()[0]/2.**30, 2)}, " + f"{memory_percent}% of Total." ) - * 100, - 2, - ) - _LOGGER.debug( - f"{self._shared.file_name} System CPU usage stat: {self._cpu_percent}%" - ) - _LOGGER.debug( - f"{self._shared.file_name} Camera Memory usage in GB: " - f"{round(proc.memory_info()[0]/2.**30, 2)}, " - f"{memory_percent}% of Total." - ) - self._cpu_percent = proc.cpu_percent() / ProcInsp().psutil.cpu_count() - self._processing = False - return self._image + self._cpu_percent = proc.cpu_percent() / ProcInsp().psutil.cpu_count() + self._processing = False + return self._image diff --git a/custom_components/valetudo_vacuum_camera/camera_shared.py b/custom_components/valetudo_vacuum_camera/camera_shared.py index 67865bc8..30dd4346 100644 --- a/custom_components/valetudo_vacuum_camera/camera_shared.py +++ b/custom_components/valetudo_vacuum_camera/camera_shared.py @@ -17,7 +17,7 @@ def __init__(self): self.destinations: list = [] # MQTT rand destinations self.is_rand: bool = False # MQTT rand data self._new_mqtt_message = False # New MQTT message - self._last_image = None # Last image received + self.last_image = None # Last image received self.image_size = None # Image size self.image_grab = True # Grab image from MQTT self.image_rotate: int = 0 # Rotate image diff --git a/custom_components/valetudo_vacuum_camera/valetudo/MQTT/connector.py b/custom_components/valetudo_vacuum_camera/valetudo/MQTT/connector.py index a7980719..1e43fb53 100644 --- a/custom_components/valetudo_vacuum_camera/valetudo/MQTT/connector.py +++ b/custom_components/valetudo_vacuum_camera/valetudo/MQTT/connector.py @@ -1,5 +1,5 @@ """ -Version 1.5.2 +Version 1.5.7.2 - Removed the PNG decode, the json is extracted from map-data instead of map-data hass. - Tested no influence on the camera performance. - Added gzip library used in Valetudo RE data compression. @@ -23,7 +23,7 @@ class ValetudoConnector: - def __init__(self, mqtt_topic, hass, camera_shared: None): + def __init__(self, mqtt_topic, hass, camera_shared): self._hass = hass self._mqtt_topic = mqtt_topic self._unsubscribe_handlers = [] diff --git a/custom_components/valetudo_vacuum_camera/valetudo/hypfer/image_handler.py b/custom_components/valetudo_vacuum_camera/valetudo/hypfer/image_handler.py index 23df2555..9a480ce4 100644 --- a/custom_components/valetudo_vacuum_camera/valetudo/hypfer/image_handler.py +++ b/custom_components/valetudo_vacuum_camera/valetudo/hypfer/image_handler.py @@ -9,14 +9,16 @@ import hashlib import json import logging +import os +import gc from PIL import Image import numpy as np import svgwrite from svgwrite import shapes +from psutil_home_assistant import PsutilWrapper as ProcInspector from custom_components.valetudo_vacuum_camera.types import Color -from custom_components.valetudo_vacuum_camera.valetudo.hypfer.handler_pocessor import ImageHandlerProcessor from custom_components.valetudo_vacuum_camera.utils.colors_man import color_grey from custom_components.valetudo_vacuum_camera.utils.draweble import Drawable from custom_components.valetudo_vacuum_camera.utils.img_data import ImageData @@ -24,6 +26,13 @@ _LOGGER = logging.getLogger(__name__) +# Custom exception for memory shortage +class MemoryShortageError(Exception): + def __init__(self, message="Not enough memory available"): + self.message = message + super().__init__(self.message) + + # noinspection PyTypeChecker,PyUnboundLocalVariable,PyUnresolvedReferences class MapImageHandler(object): def __init__(self, shared_data): @@ -52,7 +61,6 @@ def __init__(self, shared_data): self.trim_left = None # memory stored trims calculated once. self.trim_right = None # memory stored trims calculated once. self.trim_up = None # memory stored trims calculated once. - self._processor = ImageHandlerProcessor(self.shared) # imported Camera Processor Module. async def async_auto_crop_and_trim_array( self, @@ -64,95 +72,99 @@ async def async_auto_crop_and_trim_array( """ Automatically crops and trims a numpy array and returns the processed image. """ - if not self.auto_crop: - _LOGGER.debug( - f"{self.shared.file_name}: Image original size ({image_array.shape[1]}, {image_array.shape[0]})." - ) - # Find the coordinates of the first occurrence of a non-background color - nonzero_coords = np.column_stack( - np.where(image_array != list(detect_colour)) - ) - # Calculate the crop box based on the first and last occurrences - min_y, min_x, dummy = np.min(nonzero_coords, axis=0) - max_y, max_x, dummy = np.max(nonzero_coords, axis=0) - del dummy, nonzero_coords - _LOGGER.debug( - "{}: Found crop max and min values (y,x) ({}, {}) ({},{})...".format( - self.shared.file_name, int(max_y), int(max_x), int(min_y), int(min_x) + try: + if not self.auto_crop: + _LOGGER.debug( + f"{self.shared.file_name}: Image original size ({image_array.shape[1]}, {image_array.shape[0]})." ) - ) - # Calculate and store the trims coordinates with margins - self.trim_left = int(min_x) - margin_size - self.trim_up = int(min_y) - margin_size - self.trim_right = int(max_x) + margin_size - self.trim_down = int(max_y) + margin_size - del min_y, min_x, max_x, max_y - _LOGGER.debug( - "{}: Calculated trims coordinates right {}, bottom {}, left {}, up {}.".format( - self.shared.file_name, self.trim_right, self.trim_down, self.trim_left, self.trim_up + # Find the coordinates of the first occurrence of a non-background color + nonzero_coords = np.column_stack( + np.where(image_array != list(detect_colour)) ) - ) - # Calculate the dimensions after trimming using min/max values - trimmed_width = max(0, self.trim_right - self.trim_left) - trimmed_height = max(0, self.trim_down - self.trim_up) - _LOGGER.debug( - "{}: Calculated trimmed image width {} and height {}".format( - self.shared.file_name, trimmed_width, trimmed_height + # Calculate the crop box based on the first and last occurrences + min_y, min_x, dummy = np.min(nonzero_coords, axis=0) + max_y, max_x, dummy = np.max(nonzero_coords, axis=0) + del dummy, nonzero_coords + _LOGGER.debug( + "{}: Found crop max and min values (y,x) ({}, {}) ({},{})...".format( + self.shared.file_name, int(max_y), int(max_x), int(min_y), int(min_x) + ) ) - ) - # Test if the trims are okay or not - if trimmed_height <= margin_size or trimmed_width <= margin_size: - _LOGGER.debug(f"{self.shared.file_name}: Background colour not detected at rotation {rotate}.") - pos_0 = 0 + # Calculate and store the trims coordinates with margins + self.trim_left = int(min_x) - margin_size + self.trim_up = int(min_y) - margin_size + self.trim_right = int(max_x) + margin_size + self.trim_down = int(max_y) + margin_size + del min_y, min_x, max_x, max_y + _LOGGER.debug( + "{}: Calculated trims coordinates right {}, bottom {}, left {}, up {}.".format( + self.shared.file_name, self.trim_right, self.trim_down, self.trim_left, self.trim_up + ) + ) + # Calculate the dimensions after trimming using min/max values + trimmed_width = max(0, self.trim_right - self.trim_left) + trimmed_height = max(0, self.trim_down - self.trim_up) + _LOGGER.debug( + "{}: Calculated trimmed image width {} and height {}".format( + self.shared.file_name, trimmed_width, trimmed_height + ) + ) + # Test if the trims are okay or not + if trimmed_height <= margin_size or trimmed_width <= margin_size: + _LOGGER.debug(f"{self.shared.file_name}: Background colour not detected at rotation {rotate}.") + pos_0 = 0 + self.crop_area = ( + pos_0, + pos_0, + image_array.shape[1], + image_array.shape[0], + ) + self.img_size = (image_array.shape[1], image_array.shape[0]) + del trimmed_width, trimmed_height + return image_array + + # Store Crop area of the original image_array we will use from the next frame. + self.auto_crop = ( + self.trim_left, + self.trim_up, + self.trim_right, + self.trim_down, + ) + # Apply the auto-calculated trims to the rotated image + trimmed = image_array[ + self.auto_crop[1] : self.auto_crop[3], self.auto_crop[0] : self.auto_crop[2] + ] + del image_array + # Rotate the cropped image based on the given angle + if rotate == 90: + rotated = np.rot90(trimmed, 1) self.crop_area = ( - pos_0, - pos_0, - image_array.shape[1], - image_array.shape[0], + self.trim_left, + self.trim_up, + self.trim_right, + self.trim_down, ) - self.img_size = (image_array.shape[1], image_array.shape[0]) - del trimmed_width, trimmed_height - return image_array - - # Store Crop area of the original image_array we will use from the next frame. - self.auto_crop = ( - self.trim_left, - self.trim_up, - self.trim_right, - self.trim_down, - ) - # Apply the auto-calculated trims to the rotated image - trimmed = image_array[ - self.auto_crop[1] : self.auto_crop[3], self.auto_crop[0] : self.auto_crop[2] - ] - del image_array - # Rotate the cropped image based on the given angle - if rotate == 90: - rotated = np.rot90(trimmed, 1) - self.crop_area = ( - self.trim_left, - self.trim_up, - self.trim_right, - self.trim_down, - ) - elif rotate == 180: - rotated = np.rot90(trimmed, 2) - self.crop_area = self.auto_crop - elif rotate == 270: - rotated = np.rot90(trimmed, 3) - self.crop_area = ( - self.trim_left, - self.trim_up, - self.trim_right, - self.trim_down, - ) - else: - rotated = trimmed - self.crop_area = self.auto_crop - del trimmed - _LOGGER.debug(f"{self.shared.file_name}: Auto Crop and Trim Box data: {self.crop_area}") - self.crop_img_size = (rotated.shape[1], rotated.shape[0]) - _LOGGER.debug(f"{self.shared.file_name}: Auto Crop and Trim image size: {self.crop_img_size}") + elif rotate == 180: + rotated = np.rot90(trimmed, 2) + self.crop_area = self.auto_crop + elif rotate == 270: + rotated = np.rot90(trimmed, 3) + self.crop_area = ( + self.trim_left, + self.trim_up, + self.trim_right, + self.trim_down, + ) + else: + rotated = trimmed + self.crop_area = self.auto_crop + del trimmed + _LOGGER.debug(f"{self.shared.file_name}: Auto Crop and Trim Box data: {self.crop_area}") + self.crop_img_size = (rotated.shape[1], rotated.shape[0]) + _LOGGER.debug(f"{self.shared.file_name}: Auto Crop and Trim image size: {self.crop_img_size}") + except Exception as e: + _LOGGER.error(f"{self.shared.file_name}: Error {e} during auto crop and trim.", exc_info=True) + return None return rotated def extract_room_properties(self, json_data): @@ -218,7 +230,6 @@ async def async_get_image_from_json( _LOGGER.info(f"{self.shared.file_name}: Composing the image for the camera.") # buffer json data self.json_data = m_json - # result_queue = Queue() if self.room_propriety and self.frame_number == 0: _LOGGER.info(f"{self.shared.file_name}: Supporting Rooms Cleaning!") @@ -351,7 +362,7 @@ async def async_get_image_from_json( layers, active = self.data.find_layers(m_json["layers"]) new_frame_hash = await self.calculate_array_hash(layers, active) if self.frame_number == 0: - self.img_hash = new_frame_hash # await self.calculate_array_hash(layers, active) + self.img_hash = new_frame_hash # The below is drawing the base layer that will be reused at the next frame. _LOGGER.debug(f"{self.shared.file_name}: Layers to draw: {layers.keys()}") _LOGGER.info(f"{self.shared.file_name}: Empty image with background color") @@ -389,6 +400,7 @@ async def async_get_image_from_json( img_np_array = await self.draw.from_json_to_image( img_np_array, pixels, pixel_size, room_color ) + gc.collect(2) if room_id < 15: room_id += 1 else: @@ -447,74 +459,90 @@ async def async_get_image_from_json( _LOGGER.info(f"{self.shared.file_name}: Completed base Layers") self.img_base_layer = await self.async_copy_array(img_np_array) - if self.shared.export_svg and self.frame_number == 0: - await self.async_numpy_array_to_svg( - base_layer=self.img_base_layer, - colours_list=rooms_list, - color_background=color_background - ) + # if self.shared.export_svg and self.frame_number == 0: + # await self.async_numpy_array_to_svg( + # base_layer=self.img_base_layer, + # colours_list=rooms_list, + # color_background=color_background + # ) self.frame_number += 1 if (self.frame_number > 1024) or (new_frame_hash != self.img_hash): self.frame_number = 0 - + gc.collect(2) _LOGGER.debug(f"{self.shared.file_name}: Frame number %s", self.frame_number) + try: + self.check_memory_with_margin(self.img_base_layer) + except MemoryShortageError as e: + _LOGGER.error(f"Memory shortage error: {e}") + return None img_np_array = await self.async_copy_array(self.img_base_layer) # All below will be drawn each time # If there is a zone clean we draw it now. - if zone_clean: - try: - zones_clean = zone_clean.get("active_zone") - except KeyError: - zones_clean = None - if zones_clean: - _LOGGER.info(f"{self.shared.file_name}: Drawing Zone Clean.") - img_np_array = await self.draw.zones( - img_np_array, zones_clean, color_zone_clean - ) + try: + if zone_clean: + try: + zones_clean = zone_clean.get("active_zone") + except KeyError: + zones_clean = None + if zones_clean: + _LOGGER.info(f"{self.shared.file_name}: Drawing Zone Clean.") + img_np_array = await self.draw.zones( + img_np_array, zones_clean, color_zone_clean + ) - if go_to: - img_np_array = await self.draw.go_to_flag( - img_np_array, - (go_to[0]["points"][0], go_to[0]["points"][1]), - self.img_rotate, - color_go_to, - ) - if predicted_pat2: - img_np_array = await self.draw.lines( - img_np_array, predicted_pat2, 2, color_grey - ) - # draw path - if path_pixels: - for path in path_pixels: - # Get the points from the current path and extend the all_path_points list - points = path.get("points", []) - sublists = self.data.sublist(points, 2) - path_pixel2 = self.data.sublist_join(sublists, 2) + if go_to: + img_np_array = await self.draw.go_to_flag( + img_np_array, + (go_to[0]["points"][0], go_to[0]["points"][1]), + self.img_rotate, + color_go_to, + ) + if predicted_pat2: img_np_array = await self.draw.lines( - img_np_array, path_pixel2, 5, color_move + img_np_array, predicted_pat2, 2, color_grey ) - if self.shared.vacuum_state == "docked": - robot_position_angle = robot_position_angle - 180 - if robot_pos: - img_np_array = await self.draw.robot( - layers=img_np_array, - x=robot_position[0], - y=robot_position[1], - angle=robot_position_angle, - fill=color_robot, - log=self.shared.file_name, + # draw path + if path_pixels: + for path in path_pixels: + # Get the points from the current path and extend the all_path_points list + points = path.get("points", []) + sublists = self.data.sublist(points, 2) + path_pixel2 = self.data.sublist_join(sublists, 2) + img_np_array = await self.draw.lines( + img_np_array, path_pixel2, 5, color_move + ) + if self.shared.vacuum_state == "docked": + robot_position_angle = robot_position_angle - 180 + if robot_pos: + img_np_array = await self.draw.robot( + layers=img_np_array, + x=robot_position[0], + y=robot_position[1], + angle=robot_position_angle, + fill=color_robot, + log=self.shared.file_name, + ) + _LOGGER.debug( + f"{self.shared.file_name}: Auto cropping the image with rotation:" + f" {int(self.shared.image_rotate)}" ) - _LOGGER.debug( - f"{self.shared.file_name}: Auto cropping the image with rotation: {int(self.shared.image_rotate)}" - ) - img_np_array = await self.async_auto_crop_and_trim_array( - img_np_array, - color_background, - int(self.shared.margins), - int(self.shared.image_rotate), - ) + img_np_array = await self.async_auto_crop_and_trim_array( + img_np_array, + color_background, + int(self.shared.margins), + int(self.shared.image_rotate), + ) + except Exception as e: + _LOGGER.error( + f"{self.shared.file_name}: Error while drawing the image: {e}", + exc_info=True, + ) + return None + if img_np_array is None: + return None + # Convert the numpy array to a PIL image pil_img = Image.fromarray(img_np_array, mode="RGBA") del img_np_array return pil_img @@ -539,6 +567,25 @@ def get_img_size(self): def get_json_id(self): return self.json_id + # Function to calculate memory usage of a NumPy array + def calculate_memory_usage(self, array): + element_size_bytes = array.itemsize + total_memory_bytes = array.size * element_size_bytes + total_memory_mb = total_memory_bytes / (1024 * 1024) + _LOGGER.debug(f"{self.shared.file_name}: Memory usage of the array: {total_memory_mb} MB") + return total_memory_mb + + # Function to check if there is enough available memory with a margin + def check_memory_with_margin(self, array, margin=3): + pid = os.getpid() # Start to log the CPU usage of this PID + proc = ProcInspector().psutil.Process(pid) # Get the process PID. + array_memory_mb = self.calculate_memory_usage(array) + margin_memory_mb = margin * array_memory_mb + available_memory_mb = ProcInspector().psutil.virtual_memory().available / (1024 * 1024) + _LOGGER.debug(f"{self.shared.file_name}: Available memory: {available_memory_mb} MB") + if available_memory_mb < margin_memory_mb: + raise MemoryShortageError(f"Not enough memory available (Margin: {margin}x)") + async def async_get_rooms_attributes(self): if self.room_propriety: return self.room_propriety