diff --git a/custom_components/mqtt_vacuum_camera/__init__.py b/custom_components/mqtt_vacuum_camera/__init__.py index 5a4ccd90..47fb6a0b 100755 --- a/custom_components/mqtt_vacuum_camera/__init__.py +++ b/custom_components/mqtt_vacuum_camera/__init__.py @@ -13,12 +13,9 @@ Platform, ) from homeassistant.core import ServiceCall -from homeassistant.exceptions import ConfigEntryNotReady +from homeassistant.exceptions import ConfigEntryNotReady, ServiceValidationError from homeassistant.helpers.reload import async_register_admin_service from homeassistant.helpers.storage import STORAGE_DIR -from homeassistant.exceptions import ( - ServiceValidationError, -) from .common import ( get_device_info, diff --git a/custom_components/mqtt_vacuum_camera/camera_processing.py b/custom_components/mqtt_vacuum_camera/camera_processing.py index 5dc08a47..b27cb3fb 100755 --- a/custom_components/mqtt_vacuum_camera/camera_processing.py +++ b/custom_components/mqtt_vacuum_camera/camera_processing.py @@ -31,7 +31,7 @@ class CameraProcessor: def __init__(self, hass, camera_shared): self.hass = hass self._map_handler = MapImageHandler(camera_shared, hass) - self._re_handler = ReImageHandler(camera_shared) + self._re_handler = ReImageHandler(camera_shared, hass) self._shared = camera_shared self._file_name = self._shared.file_name self._translations_path = self.hass.config.path( diff --git a/custom_components/mqtt_vacuum_camera/manifest.json b/custom_components/mqtt_vacuum_camera/manifest.json index 29779795..a1dc38b0 100755 --- a/custom_components/mqtt_vacuum_camera/manifest.json +++ b/custom_components/mqtt_vacuum_camera/manifest.json @@ -7,6 +7,6 @@ "documentation": "https://github.com/sca075/mqtt_vacuum_camera", "iot_class": "local_polling", "issue_tracker": "https://github.com/sca075/mqtt_vacuum_camera/issues", - "requirements": ["pillow", "numpy"], - "version": "2024.08.0b3" + "requirements": ["pillow>=10.3.0,<11.0.0", "numpy"], + "version": "2024.08.0" } diff --git a/custom_components/mqtt_vacuum_camera/utils/files_operations.py b/custom_components/mqtt_vacuum_camera/utils/files_operations.py index 31e576e6..fb593faa 100755 --- a/custom_components/mqtt_vacuum_camera/utils/files_operations.py +++ b/custom_components/mqtt_vacuum_camera/utils/files_operations.py @@ -18,10 +18,8 @@ from typing import Any, Optional from homeassistant.core import HomeAssistant +from homeassistant.exceptions import ServiceValidationError from homeassistant.helpers.storage import STORAGE_DIR -from homeassistant.exceptions import ( - ServiceValidationError, -) from custom_components.mqtt_vacuum_camera.const import CAMERA_STORAGE from custom_components.mqtt_vacuum_camera.types import RoomStore, UserLanguageStore diff --git a/custom_components/mqtt_vacuum_camera/utils/img_data.py b/custom_components/mqtt_vacuum_camera/utils/img_data.py index fefe6348..c506dabd 100755 --- a/custom_components/mqtt_vacuum_camera/utils/img_data.py +++ b/custom_components/mqtt_vacuum_camera/utils/img_data.py @@ -487,8 +487,8 @@ async def async_get_rrm_segments( ) if out_lines: room_coords = await ImageData.async_get_rooms_coordinates( - pixels=segments[count_seg], - rand=True) + pixels=segments[count_seg], rand=True + ) outlines.append(room_coords) count_seg += 1 if count_seg > 0: diff --git a/custom_components/mqtt_vacuum_camera/valetudo/MQTT/connector.py b/custom_components/mqtt_vacuum_camera/valetudo/MQTT/connector.py index a849acb0..462cb0e1 100755 --- a/custom_components/mqtt_vacuum_camera/valetudo/MQTT/connector.py +++ b/custom_components/mqtt_vacuum_camera/valetudo/MQTT/connector.py @@ -170,7 +170,10 @@ async def is_disconnect_vacuum(self) -> None: Disconnect the vacuum detected. Generate a Warning message if the vacuum is disconnected. """ - if self._mqtt_vac_connect_state == "disconnected" or self._mqtt_vac_connect_state == "lost": + if ( + self._mqtt_vac_connect_state == "disconnected" + or self._mqtt_vac_connect_state == "lost" + ): _LOGGER.debug( f"{self._mqtt_topic}: Vacuum Disconnected from MQTT, waiting for connection." ) @@ -248,8 +251,10 @@ async def rand256_handle_destinations(self, msg) -> None: self._payload = msg.payload tmp_data = await self.async_decode_mqtt_payload(msg) self._rrm_destinations = tmp_data - if 'rooms' in tmp_data: - rooms_data = {str(room['id']): room['name'].strip('#') for room in tmp_data['rooms']} + if "rooms" in tmp_data: + rooms_data = { + str(room["id"]): room["name"].strip("#") for room in tmp_data["rooms"] + } await RoomStore().async_set_rooms_data(self._file_name, rooms_data) _LOGGER.info( f"{self._file_name}: Received vacuum destinations: {self._rrm_destinations}" @@ -257,32 +262,35 @@ async def rand256_handle_destinations(self, msg) -> None: async def rrm_handle_active_segments(self, msg) -> None: """ - Handle new MQTT messages. + Handle new MQTT messages regarding active segments. /active_segments is for Rand256. - @param msg: MQTT message - { "command": "segmented_cleanup", "segment_ids": [2], "repeats": 1, "afterCleaning": "Base" } """ - command_status = json.loads(msg.payload) + command_status = await self.async_decode_mqtt_payload(msg) + _LOGGER.debug(f"Command Status: {command_status}") command = command_status.get("command", None) - if command == "segmented_cleanup" and self._rrm_destinations: + if command == "segmented_cleanup": segment_ids = command_status.get("segment_ids", []) - # Parse rooms JSON from _rrm_destinations - rooms_json = json.loads(self._rrm_destinations) - rooms = rooms_json.get("rooms", []) - # Create a mapping of room IDs to their positions in rooms list - room_ids = {room["id"]: idx for idx, room in enumerate(rooms, start=1)} - # Initialize rrm_active_segments with zeros - self._rrm_active_segments = [0] * len(rooms) + _LOGGER.debug(f"Segment IDs: {segment_ids}") + + # Retrieve room data from RoomStore + rooms_data = await RoomStore().async_get_rooms_data(self._file_name) + rrm_active_segments = [0] * len( + rooms_data + ) # Initialize based on the number of rooms for segment_id in segment_ids: - if segment_id in room_ids: - room_idx = room_ids[segment_id] - 1 # Index start from 0 - self._rrm_active_segments[room_idx] = 1 - self._shared.rand256_active_zone = self._rrm_active_segments - _LOGGER.debug( - f"Active Segments of {self._file_name}: {self._rrm_active_segments}" - ) + room_name = rooms_data.get(str(segment_id)) + if room_name: + # Convert room ID to index; since dict doesn't preserve order, find index manually + room_idx = list(rooms_data.keys()).index(str(segment_id)) + rrm_active_segments[room_idx] = 1 + + self._shared.rand256_active_zone = rrm_active_segments + _LOGGER.debug(f"Updated Active Segments: {rrm_active_segments}") + else: + self._shared.rand256_active_zone = [] + _LOGGER.debug("No valid command or room data; segments cleared.") @callback async def async_message_received(self, msg) -> None: diff --git a/custom_components/mqtt_vacuum_camera/valetudo/hypfer/image_handler.py b/custom_components/mqtt_vacuum_camera/valetudo/hypfer/image_handler.py index 91fadca7..6012434f 100755 --- a/custom_components/mqtt_vacuum_camera/valetudo/hypfer/image_handler.py +++ b/custom_components/mqtt_vacuum_camera/valetudo/hypfer/image_handler.py @@ -270,8 +270,8 @@ async def async_extract_room_properties(self, json_data): compressed_pixels = layer.get("compressedPixels", []) pixels = self.data.sublist(compressed_pixels, 3) # Calculate x and y min/max from compressed pixels - x_min, y_min, x_max, y_max = await self.data.async_get_rooms_coordinates( - pixels, pixel_size + x_min, y_min, x_max, y_max = ( + await self.data.async_get_rooms_coordinates(pixels, pixel_size) ) corners = [ (x_min, y_min), diff --git a/custom_components/mqtt_vacuum_camera/valetudo/rand256/image_handler.py b/custom_components/mqtt_vacuum_camera/valetudo/rand256/image_handler.py index 0ad29059..63648eed 100755 --- a/custom_components/mqtt_vacuum_camera/valetudo/rand256/image_handler.py +++ b/custom_components/mqtt_vacuum_camera/valetudo/rand256/image_handler.py @@ -8,11 +8,15 @@ from __future__ import annotations import logging +import os.path import uuid from PIL import Image, ImageOps +from homeassistant.core import HomeAssistant +from homeassistant.helpers.storage import STORAGE_DIR import numpy as np +from custom_components.mqtt_vacuum_camera.const import CAMERA_STORAGE from custom_components.mqtt_vacuum_camera.types import ( Color, JsonType, @@ -20,10 +24,19 @@ PilPNG, RobotPosition, RoomsProperties, + TrimCropData, ) from custom_components.mqtt_vacuum_camera.utils.colors_man import color_grey from custom_components.mqtt_vacuum_camera.utils.drawable import Drawable +from custom_components.mqtt_vacuum_camera.utils.files_operations import ( + async_load_file, + async_write_json_to_disk, +) from custom_components.mqtt_vacuum_camera.utils.img_data import ImageData +from custom_components.mqtt_vacuum_camera.valetudo.hypfer.handler_utils import ( + ImageUtils as ImUtils, + TrimError, +) _LOGGER = logging.getLogger(__name__) @@ -34,7 +47,8 @@ class ReImageHandler(object): Image Handler for Valetudo Re Vacuums. """ - def __init__(self, camera_shared): + def __init__(self, camera_shared, hass: HomeAssistant): + self.hass = hass self.auto_crop = None # Auto crop flag self.segment_data = None # Segment data self.outlines = None # Outlines data @@ -64,121 +78,178 @@ def __init__(self, camera_shared): self.trim_up = None # Trim up self.zooming = False # Zooming flag self.file_name = self.shared.file_name # File name + self.path_to_data = self.hass.config.path( + STORAGE_DIR, CAMERA_STORAGE, f"auto_crop_{self.file_name}.json" + ) # path to the data self.offset_x = 0 # offset x for the aspect ratio. self.offset_y = 0 # offset y for the aspect ratio. self.offset_top = self.shared.offset_top # offset top self.offset_bottom = self.shared.offset_down # offset bottom self.offset_left = self.shared.offset_left # offset left self.offset_right = self.shared.offset_right # offset right + self.imu = ImUtils(self) + + def check_trim( + self, trimmed_height, trimmed_width, margin_size, image_array, file_name, rotate + ): + """ + Check if the trimming is okay. + """ + if trimmed_height <= margin_size or trimmed_width <= margin_size: + self.crop_area = [0, 0, image_array.shape[1], image_array.shape[0]] + self.img_size = (image_array.shape[1], image_array.shape[0]) + raise TrimError( + f"{file_name}: Trimming failed at rotation {rotate}.", + image_array, + ) + + def _calculate_trimmed_dimensions(self): + """Calculate and update the dimensions after trimming.""" + trimmed_width = max( + 0, + ( + (self.trim_right - self.offset_right) + - (self.trim_left + self.offset_left) + ), + ) + trimmed_height = max( + 0, + ((self.trim_down - self.offset_bottom) - (self.trim_up + self.offset_top)), + ) + + # Ensure shared reference dimensions are updated + if hasattr(self.shared, "image_ref_height") and hasattr( + self.shared, "image_ref_width" + ): + self.shared.image_ref_height = trimmed_height + self.shared.image_ref_width = trimmed_width + else: + _LOGGER.warning( + "Shared attributes for image dimensions are not initialized." + ) + return trimmed_width, trimmed_height + + async def _async_auto_crop_data(self): + """Load the auto crop data from the disk.""" + try: + if os.path.exists(self.path_to_data) and self.auto_crop is None: + temp_data = await async_load_file(self.path_to_data, True) + if temp_data is not None: + trims_data = TrimCropData.from_dict(dict(temp_data)).to_list() + self.trim_left, self.trim_up, self.trim_right, self.trim_down = ( + trims_data + ) + + # Calculate the dimensions after trimming using min/max values + _, _ = self._calculate_trimmed_dimensions() + return trims_data + else: + _LOGGER.error("Trim data file is empty.") + return None + except Exception as e: + _LOGGER.error(f"Failed to load trim data due to an error: {e}") + return None + + def auto_crop_offset(self): + """Calculate the crop offset.""" + if self.auto_crop: + self.auto_crop[0] += self.offset_left + self.auto_crop[1] += self.offset_top + self.auto_crop[2] -= self.offset_right + self.auto_crop[3] -= self.offset_bottom + else: + _LOGGER.warning( + "Auto crop data is not available. Time Out Warning will occurs!" + ) + self.auto_crop = None + + async def _init_auto_crop(self): + if self.auto_crop is None: + _LOGGER.debug(f"{self.file_name}: Trying to load crop data from disk") + self.auto_crop = await self._async_auto_crop_data() + self.auto_crop_offset() + return self.auto_crop + + async def _async_save_auto_crop_data(self): + """Save the auto crop data to the disk.""" + try: + if not os.path.exists(self.path_to_data): + _LOGGER.debug("Writing crop data to disk") + data = TrimCropData( + self.trim_left, self.trim_up, self.trim_right, self.trim_down + ).to_dict() + await async_write_json_to_disk(self.path_to_data, data) + except Exception as e: + _LOGGER.error(f"Failed to save trim data due to an error: {e}") - async def auto_crop_and_trim_array( + async def async_auto_trim_and_zoom_image( self, image_array: NumpyArray, - detect_colour: Color, + detect_colour: Color = color_grey, margin_size: int = 0, rotate: int = 0, - ) -> NumpyArray: + zoom: bool = False, + ): """ Automatically crops and trims a numpy array and returns the processed image. """ - if not self.auto_crop: - _LOGGER.debug( - f"Image original size: {image_array.shape[1]}, {image_array.shape[0]}" - ) - # Find the coordinates of the first occurrence of a non-background color - nonzero_coords = np.column_stack( - np.where(image_array != list(detect_colour)) - ) - # Calculate the crop box based on the first and last occurrences - min_y, min_x, dummy = np.min(nonzero_coords, axis=0) - max_y, max_x, dummy = np.max(nonzero_coords, axis=0) - del dummy, nonzero_coords - _LOGGER.debug( - "Found crop max and min values (y,x) ({}, {}) ({},{})...".format( - int(max_y), int(max_x), int(min_y), int(min_x) - ) - ) - # Calculate and store the trims coordinates with margins - self.trim_left = int(min_x) + self.offset_left - margin_size - self.trim_up = int(min_y) + self.offset_top - margin_size - self.trim_right = int(max_x) - self.offset_right + margin_size - self.trim_down = int(max_y) - self.offset_bottom + margin_size - del min_y, min_x, max_x, max_y - _LOGGER.debug( - "Calculated trims coordinates right {}, bottom {}, left {}, up {} ".format( - self.trim_right, self.trim_down, self.trim_left, self.trim_up + try: + await self._init_auto_crop() + if self.auto_crop is None: + _LOGGER.debug(f"{self.file_name}: Calculating auto trim box") + # Find the coordinates of the first occurrence of a non-background color + min_y, min_x, max_x, max_y = await self.imu.async_image_margins( + image_array, detect_colour ) + # Calculate and store the trims coordinates with margins + self.trim_left = int(min_x) - margin_size + self.trim_up = int(min_y) - margin_size + self.trim_right = int(max_x) + margin_size + self.trim_down = int(max_y) + margin_size + del min_y, min_x, max_x, max_y + + # Calculate the dimensions after trimming using min/max values + trimmed_width, trimmed_height = self._calculate_trimmed_dimensions() + + # Test if the trims are okay or not + try: + self.check_trim( + trimmed_height, + trimmed_width, + margin_size, + image_array, + self.file_name, + rotate, + ) + except TrimError as e: + return e.image + + # Store Crop area of the original image_array we will use from the next frame. + self.auto_crop = TrimCropData( + self.trim_left, self.trim_up, self.trim_right, self.trim_down + ).to_list() + await self._async_save_auto_crop_data() # Save the crop data to the disk + self.auto_crop_offset() + # If it is needed to zoom the image. + trimmed = await self.imu.async_check_if_zoom_is_on( + image_array, margin_size, zoom ) - # Calculate the dimensions after trimming using min/max values - trimmed_width = max(0, self.trim_right - self.trim_left) - trimmed_height = max(0, self.trim_down - self.trim_up) - trim_r = image_array.shape[1] - self.trim_right - trim_d = image_array.shape[0] - self.trim_down - trim_l = image_array.shape[1] - self.trim_left - trim_u = image_array.shape[0] - self.trim_up + del image_array # Free memory. + # Rotate the cropped image based on the given angle + rotated = await self.imu.async_rotate_the_image(trimmed, rotate) + del trimmed # Free memory. + _LOGGER.debug(f"{self.file_name}: Auto Trim Box data: {self.crop_area}") + self.crop_img_size = [rotated.shape[1], rotated.shape[0]] _LOGGER.debug( - "Calculated trims values for right {}, bottom {}, left {} and up {}.".format( - trim_r, trim_d, trim_l, trim_u - ) + f"{self.file_name}: Auto Trimmed image size: {self.crop_img_size}" ) - _LOGGER.debug( - "Calculated trim width {} and trim height {}".format( - trimmed_width, trimmed_height - ) + + except Exception as e: + _LOGGER.warning( + f"{self.file_name}: Error {e} during auto trim and zoom.", + exc_info=True, ) - # Test if the trims are okay or not - if trimmed_height <= margin_size or trimmed_width <= margin_size: - _LOGGER.debug(f"Background colour not detected at rotation {rotate}.") - pos_0 = 0 - self.crop_area = ( - pos_0, - pos_0, - image_array.shape[1], - image_array.shape[0], - ) - self.img_size = (image_array.shape[1], image_array.shape[0]) - del trimmed_width, trimmed_height - return image_array - # Store Crop area of the original image_array we will use from the next frame. - self.auto_crop = [ - self.trim_left, - self.trim_up, - self.trim_right, - self.trim_down, - ] - # Apply the auto-calculated trims to the rotated image - trimmed = image_array[ - self.auto_crop[1] : self.auto_crop[3], - self.auto_crop[0] : self.auto_crop[2], - ] - del image_array - # Rotate the cropped image based on the given angle - if rotate == 90: - rotated = np.rot90(trimmed) - self.crop_area = [ - self.trim_left, - self.trim_up, - self.trim_right, - self.trim_down, - ] - elif rotate == 180: - rotated = np.rot90(trimmed, 2) - self.crop_area = self.auto_crop - elif rotate == 270: - rotated = np.rot90(trimmed, 3) - self.crop_area = [ - self.trim_left, - self.trim_up, - self.trim_right, - self.trim_down, - ] - else: - rotated = trimmed - self.crop_area = self.auto_crop - del trimmed - _LOGGER.debug("Auto Trim Box data: %s", self.crop_area) - self.crop_img_size = [rotated.shape[1], rotated.shape[0]] - _LOGGER.debug("Trimmed image size: %s", self.crop_img_size) + return None return rotated async def extract_room_properties( @@ -377,10 +448,12 @@ async def get_image_from_rrm( ) # checking if there are segments too (sorted pixels in the raw data). if not self.segment_data: - self.segment_data, self.outlines = await self.data.async_get_rrm_segments( - m_json, size_x, size_y, pos_top, pos_left, True + self.segment_data, self.outlines = ( + await self.data.async_get_rrm_segments( + m_json, size_x, size_y, pos_top, pos_left, True + ) ) - + if (self.segment_data and pixels) or pixels: room_color = self.shared.rooms_colors[room_id] # drawing floor @@ -392,10 +465,16 @@ async def get_image_from_rrm( room_id = 0 rooms_list = [color_wall] if self.segment_data: - _LOGGER.info(self.file_name + ": Drawing segments ") + _LOGGER.info( + f"{self.file_name}: Drawing segments >>>>>>>>>>>>>< " + ) for pixels in self.segment_data: room_color = self.shared.rooms_colors[room_id] rooms_list.append(room_color) + _LOGGER.debug( + f"Room {room_id} color: {room_color}", + {tuple(self.active_zones)}, + ) if ( self.active_zones and len(self.active_zones) > room_id @@ -503,11 +582,12 @@ async def get_image_from_rrm( f"{self.file_name}:" f" Auto cropping the image with rotation {int(self.shared.image_rotate)}" ) - img_np_array = await self.auto_crop_and_trim_array( + img_np_array = await self.async_auto_trim_and_zoom_image( img_np_array, color_background, int(self.shared.margins), int(self.shared.image_rotate), + self.zooming, ) pil_img = Image.fromarray(img_np_array, mode="RGBA") del img_np_array # unload memory