diff --git a/backend/app/database/images.py b/backend/app/database/images.py index 361818947..95e22477c 100644 --- a/backend/app/database/images.py +++ b/backend/app/database/images.py @@ -6,6 +6,10 @@ from app.config.settings import ( DATABASE_PATH, ) +from app.logging.setup_logging import get_logger + +# Initialize logger +logger = get_logger(__name__) # Type definitions ImageId = str @@ -108,7 +112,7 @@ def db_bulk_insert_images(image_records: List[ImageRecord]) -> bool: conn.commit() return True except Exception as e: - print(f"Error inserting image records: {e}") + logger.error(f"Error inserting image records: {e}") conn.rollback() return False finally: @@ -189,7 +193,7 @@ def db_get_all_images() -> List[dict]: return images except Exception as e: - print(f"Error getting all images: {e}") + logger.error(f"Error getting all images: {e}") return [] finally: conn.close() @@ -264,7 +268,7 @@ def db_update_image_tagged_status(image_id: ImageId, is_tagged: bool = True) -> conn.commit() return cursor.rowcount > 0 except Exception as e: - print(f"Error updating image tagged status: {e}") + logger.error(f"Error updating image tagged status: {e}") conn.rollback() return False finally: @@ -298,7 +302,7 @@ def db_insert_image_classes_batch(image_class_pairs: List[ImageClassPair]) -> bo conn.commit() return True except Exception as e: - print(f"Error inserting image classes: {e}") + logger.error(f"Error inserting image classes: {e}") conn.rollback() return False finally: @@ -336,7 +340,7 @@ def db_get_images_by_folder_ids( ) return cursor.fetchall() except Exception as e: - print(f"Error getting images by folder IDs: {e}") + logger.error(f"Error getting images by folder IDs: {e}") return [] finally: conn.close() @@ -367,10 +371,10 @@ def db_delete_images_by_ids(image_ids: List[ImageId]) -> bool: image_ids, ) conn.commit() - print(f"Deleted {cursor.rowcount} obsolete image(s) from database") + logger.info(f"Deleted {cursor.rowcount} obsolete image(s) from database") return True except Exception as e: - print(f"Error deleting images: {e}") + logger.error(f"Error deleting images: {e}") conn.rollback() return False finally: diff --git a/backend/app/logging/__init__.py b/backend/app/logging/__init__.py new file mode 100644 index 000000000..7f1ca9234 --- /dev/null +++ b/backend/app/logging/__init__.py @@ -0,0 +1,9 @@ +""" +__init__.py for the backend.app.logging package. + +This file allows the package to be imported and initializes logging. +""" + +from .setup_logging import get_logger, configure_uvicorn_logging, setup_logging + +__all__ = ["get_logger", "configure_uvicorn_logging", "setup_logging"] diff --git a/backend/app/logging/setup_logging.py b/backend/app/logging/setup_logging.py new file mode 100644 index 000000000..e64424654 --- /dev/null +++ b/backend/app/logging/setup_logging.py @@ -0,0 +1,292 @@ +""" +Core logging module for the PictoPy project. + +This module provides centralized logging functionality for all components +of the PictoPy project, including color coding and consistent formatting. +""" + +import os +import json +import logging +import sys +from pathlib import Path +from typing import Optional, Dict, Any + + +class ColorFormatter(logging.Formatter): + """ + Custom formatter that adds color to log messages based on their level. + """ + + # ANSI color codes + COLORS = { + "black": "\033[30m", + "red": "\033[31m", + "green": "\033[32m", + "yellow": "\033[33m", + "blue": "\033[34m", + "magenta": "\033[35m", + "cyan": "\033[36m", + "white": "\033[37m", + "bg_black": "\033[40m", + "bg_red": "\033[41m", + "bg_green": "\033[42m", + "bg_yellow": "\033[43m", + "bg_blue": "\033[44m", + "bg_magenta": "\033[45m", + "bg_cyan": "\033[46m", + "bg_white": "\033[47m", + "reset": "\033[0m", + } + + def __init__( + self, + fmt: str, + component_config: Dict[str, Any], + level_colors: Dict[str, str], + use_colors: bool = True, + ): + """ + Initialize the formatter with the given format string and color settings. + + Args: + fmt: The format string to use + component_config: Configuration for the component (prefix and color) + level_colors: Dictionary mapping log levels to colors + use_colors: Whether to use colors in log output + """ + super().__init__(fmt) + self.component_config = component_config + self.level_colors = level_colors + self.use_colors = use_colors + + def format(self, record: logging.LogRecord) -> str: + """Format the log record with colors and component prefix.""" + # Add component information to the record + component_prefix = self.component_config.get("prefix", "") + record.component = component_prefix + + # Format the message + formatted_message = super().format(record) + + if not self.use_colors: + return formatted_message + + # Add color to the component prefix + component_color = self.component_config.get("color", "") + if component_color and component_color in self.COLORS: + component_start = formatted_message.find(f"[{component_prefix}]") + if component_start >= 0: + component_end = component_start + len(f"[{component_prefix}]") + formatted_message = ( + formatted_message[:component_start] + + self.COLORS[component_color] + + formatted_message[component_start:component_end] + + self.COLORS["reset"] + + formatted_message[component_end:] + ) + + # Add color to the log level + level_color = self.level_colors.get(record.levelname, "") + if level_color: + # Handle comma-separated color specs like "red,bg_white" + color_codes = "" + for color in level_color.split(","): + if color in self.COLORS: + color_codes += self.COLORS[color] + + if color_codes: + level_start = formatted_message.find(f" {record.levelname} ") + if level_start >= 0: + level_end = level_start + len(f" {record.levelname} ") + formatted_message = ( + formatted_message[:level_start] + + color_codes + + formatted_message[level_start:level_end] + + self.COLORS["reset"] + + formatted_message[level_end:] + ) + + return formatted_message + + +def load_config() -> Dict[str, Any]: + """ + Load the logging configuration from the JSON file. + + Returns: + Dict containing the logging configuration + """ + config_path = ( + Path(__file__).parent.parent.parent.parent + / "utils" + / "logging" + / "logging_config.json" + ) + try: + with open(config_path, "r") as f: + return json.load(f) + except (FileNotFoundError, json.JSONDecodeError) as e: + print(f"Error loading logging configuration: {e}", file=sys.stderr) + return {} + + +def setup_logging(component_name: str, environment: Optional[str] = None) -> None: + """ + Set up logging for the given component. + + Args: + component_name: The name of the component (e.g., "backend", "sync-microservice") + environment: The environment to use (e.g., "development", "production") + If None, uses the environment specified in the config or "development" + """ + config = load_config() + if not config: + print( + "No logging configuration found. Using default settings.", file=sys.stderr + ) + return + + # Get environment settings + if not environment: + environment = os.environ.get( + "ENV", config.get("default_environment", "development") + ) + + env_settings = config.get("environments", {}).get(environment, {}) + log_level = getattr(logging, env_settings.get("level", "INFO"), logging.INFO) + use_colors = env_settings.get("colored_output", True) + console_logging = env_settings.get("console_logging", True) + + # Get component configuration + component_config = config.get("components", {}).get( + component_name, {"prefix": component_name.upper(), "color": "white"} + ) + + # Configure root logger + root_logger = logging.getLogger() + root_logger.setLevel(log_level) + + # Clear existing handlers + for handler in root_logger.handlers[:]: + root_logger.removeHandler(handler) + + # Configure specific loggers if defined in environment settings + if "loggers" in env_settings: + for logger_name, logger_config in env_settings["loggers"].items(): + logger = logging.getLogger(logger_name) + if "level" in logger_config: + logger.setLevel(getattr(logging, logger_config["level"], log_level)) + + # Set up console handler + if console_logging: + console_handler = logging.StreamHandler(sys.stdout) + console_handler.setLevel(log_level) + + # Create formatter with component and color information + fmt = ( + config.get("formatters", {}) + .get("default", {}) + .get("format", "[%(component)s] | %(levelname)s | %(message)s") + ) + formatter = ColorFormatter( + fmt, component_config, config.get("colors", {}), use_colors + ) + console_handler.setFormatter(formatter) + root_logger.addHandler(console_handler) + + +def get_logger(name: str) -> logging.Logger: + """ + Get a logger with the given name. + + Args: + name: Name of the logger, typically the module name + + Returns: + Logger instance + """ + return logging.getLogger(name) + + +class InterceptHandler(logging.Handler): + """ + Handler to intercept logs from other loggers (like Uvicorn) and redirect them + through our custom logger. + + This implementation is based on Loguru's approach and routes logs directly to + the root logger. + """ + + def __init__(self, component_name: str): + """ + Initialize the InterceptHandler. + + Args: + component_name: The name of the component (e.g., "backend") + """ + super().__init__() + self.component_name = component_name + + def emit(self, record: logging.LogRecord) -> None: + """ + Process a log record by forwarding it through our custom logger. + + Args: + record: The log record to process + """ + # Get the appropriate module name + module_name = record.name + if "." in module_name: + module_name = module_name.split(".")[-1] + + # Create a message that includes the original module in the format + msg = record.getMessage() + + # Find the appropriate logger + logger = get_logger(module_name) + + # Log the message with our custom formatting + logger.log(record.levelno, f"[uvicorn] {msg}") + + +def configure_uvicorn_logging(component_name: str) -> None: + """ + Configure Uvicorn logging to match our format. + + Args: + component_name: The name of the component (e.g., "backend") + """ + import logging.config + + # Create an intercept handler with our component name + intercept_handler = InterceptHandler(component_name) + + # Make sure the handler uses our ColorFormatter + config = load_config() + component_config = config.get("components", {}).get( + component_name, {"prefix": component_name.upper(), "color": "white"} + ) + level_colors = config.get("colors", {}) + env_settings = config.get("environments", {}).get( + os.environ.get("ENV", config.get("default_environment", "development")), {} + ) + use_colors = env_settings.get("colored_output", True) + + fmt = "[%(component)s] | %(module)s | %(levelname)s | %(message)s" + formatter = ColorFormatter(fmt, component_config, level_colors, use_colors) + intercept_handler.setFormatter(formatter) + + # Configure Uvicorn loggers to use our handler + for logger_name in ["uvicorn", "uvicorn.error", "uvicorn.access"]: + uvicorn_logger = logging.getLogger(logger_name) + uvicorn_logger.handlers = [] # Clear existing handlers + uvicorn_logger.propagate = False # Don't propagate to root + uvicorn_logger.setLevel(logging.INFO) # Ensure log level is at least INFO + uvicorn_logger.addHandler(intercept_handler) + + # Also configure asyncio logger similarly + asyncio_logger = logging.getLogger("asyncio") + asyncio_logger.handlers = [] + asyncio_logger.propagate = False + asyncio_logger.addHandler(intercept_handler) diff --git a/backend/app/models/FaceDetector.py b/backend/app/models/FaceDetector.py index 4023447a2..5129bf719 100644 --- a/backend/app/models/FaceDetector.py +++ b/backend/app/models/FaceDetector.py @@ -6,6 +6,10 @@ from app.utils.YOLO import YOLO_util_get_model_path from app.models.YOLO import YOLO from app.database.faces import db_insert_face_embeddings_by_image_id +from app.logging.setup_logging import get_logger + +# Initialize logger +logger = get_logger(__name__) class FaceDetector: @@ -17,17 +21,17 @@ def __init__(self): ) self.facenet = FaceNet(FaceNet_util_get_model_path()) self._initialized = True - print("FaceDetector initialized with YOLO and FaceNet models.") + logger.info("FaceDetector initialized with YOLO and FaceNet models.") def detect_faces(self, image_id: str, image_path: str, forSearch: bool = False): img = cv2.imread(image_path) if img is None: - print(f"Failed to load image: {image_path}") + logger.error(f"Failed to load image: {image_path}") return None boxes, scores, class_ids = self.yolo_detector(img) - print(boxes) - print(f"Detected {len(boxes)} faces in image {image_id}.") + logger.debug(f"Face detection boxes: {boxes}") + logger.info(f"Detected {len(boxes)} faces in image {image_id}.") processed_faces, embeddings, bboxes, confidences = [], [], [], [] diff --git a/backend/app/models/FaceNet.py b/backend/app/models/FaceNet.py index 8492e6cf7..df17d3a77 100644 --- a/backend/app/models/FaceNet.py +++ b/backend/app/models/FaceNet.py @@ -3,6 +3,10 @@ import onnxruntime from app.utils.FaceNet import FaceNet_util_normalize_embedding from app.utils.ONNX import ONNX_util_get_execution_providers +from app.logging.setup_logging import get_logger + +# Initialize logger +logger = get_logger(__name__) class FaceNet: @@ -22,4 +26,4 @@ def get_embedding(self, preprocessed_image): def close(self): del self.session - print("FaceNet model session closed.") + logger.info("FaceNet model session closed.") diff --git a/backend/app/models/ObjectClassifier.py b/backend/app/models/ObjectClassifier.py index 8be21c768..1371705b1 100644 --- a/backend/app/models/ObjectClassifier.py +++ b/backend/app/models/ObjectClassifier.py @@ -1,6 +1,9 @@ import cv2 from app.models.YOLO import YOLO from app.utils.YOLO import YOLO_util_get_model_path +from app.logging.setup_logging import get_logger + +logger = get_logger(__name__) class ObjectClassifier: @@ -12,11 +15,11 @@ def __init__(self): def get_classes(self, img_path) -> list[int] | None: img = cv2.imread(img_path) if img is None: - print(f"Failed to load image: {img_path}") + logger.error(f"Failed to load image: {img_path}") return None _, _, class_ids = self.yolo_classifier(img) - print(class_ids, flush=True) + logger.debug(f"Class IDs detected: {class_ids}") # convert class_ids to a list of integers from numpy array class_ids = [int(class_id) for class_id in class_ids] return class_ids diff --git a/backend/app/models/YOLO.py b/backend/app/models/YOLO.py index 0a1bb9c2a..66e55d377 100644 --- a/backend/app/models/YOLO.py +++ b/backend/app/models/YOLO.py @@ -9,6 +9,9 @@ ) from app.utils.memory_monitor import log_memory_usage from app.utils.ONNX import ONNX_util_get_execution_providers +from app.logging.setup_logging import get_logger + +logger = get_logger(__name__) class YOLO: @@ -30,7 +33,7 @@ def __call__(self, image): def close(self): del self.session # Clean up the ONNX session - print("YOLO model session closed.") + logger.info("YOLO model session closed.") @log_memory_usage def detect_objects(self, image): diff --git a/backend/app/routes/folders.py b/backend/app/routes/folders.py index 8bc995760..a66cca27c 100644 --- a/backend/app/routes/folders.py +++ b/backend/app/routes/folders.py @@ -11,6 +11,7 @@ db_get_folder_ids_by_path_prefix, db_get_all_folder_details, ) +from app.logging.setup_logging import get_logger from app.schemas.folders import ( AddFolderRequest, AddFolderResponse, @@ -44,6 +45,8 @@ from app.utils.face_clusters import cluster_util_face_clusters_sync from app.utils.API import API_util_restart_sync_microservice_watcher +# Initialize logger +logger = get_logger(__name__) router = APIRouter() @@ -63,7 +66,7 @@ def post_folder_add_sequence(folder_path: str, folder_id: int): for folder_id_from_db, folder_path_from_db in folder_ids_and_paths: folder_data.append((folder_path_from_db, folder_id_from_db, False)) - print("Add folder: ", folder_data) + logger.info(f"Add folder: {folder_data}") # Process images in all folders image_util_process_folder_images(folder_data) @@ -71,7 +74,9 @@ def post_folder_add_sequence(folder_path: str, folder_id: int): API_util_restart_sync_microservice_watcher() except Exception as e: - print(f"Error in post processing after folder {folder_path} was added: {e}") + logger.error( + f"Error in post processing after folder {folder_path} was added: {e}" + ) return False return True @@ -86,7 +91,7 @@ def post_AI_tagging_enabled_sequence(): image_util_process_untagged_images() cluster_util_face_clusters_sync() except Exception as e: - print(f"Error in post processing after AI tagging was enabled: {e}") + logger.error(f"Error in post processing after AI tagging was enabled: {e}") return False return True @@ -108,7 +113,7 @@ def post_sync_folder_sequence( for added_folder_id, added_folder_path in added_folders: folder_data.append((added_folder_path, added_folder_id, False)) - print("Sync folder: ", folder_data) + logger.info(f"Sync folder: {folder_data}") # Process images in all folders image_util_process_folder_images(folder_data) image_util_process_untagged_images() @@ -117,7 +122,9 @@ def post_sync_folder_sequence( # Restart sync microservice watcher after processing images API_util_restart_sync_microservice_watcher() except Exception as e: - print(f"Error in post processing after folder {folder_path} was synced: {e}") + logger.error( + f"Error in post processing after folder {folder_path} was synced: {e}" + ) return False return True diff --git a/backend/app/utils/face_clusters.py b/backend/app/utils/face_clusters.py index 98797ba5c..32d922d71 100644 --- a/backend/app/utils/face_clusters.py +++ b/backend/app/utils/face_clusters.py @@ -21,6 +21,10 @@ db_get_metadata, db_update_metadata, ) +from app.logging.setup_logging import get_logger + +# Initialize logger +logger = get_logger(__name__) class ClusterResult: @@ -160,7 +164,7 @@ def cluster_util_cluster_all_face_embeddings( embeddings.append(face["embeddings"]) existing_cluster_names.append(face["cluster_name"]) - print(f"Total faces to cluster: {len(face_ids)}") + logger.info(f"Total faces to cluster: {len(face_ids)}") # Convert to numpy array for DBSCAN embeddings_array = np.array(embeddings) @@ -174,7 +178,7 @@ def cluster_util_cluster_all_face_embeddings( ) cluster_labels = dbscan.fit_predict(embeddings_array) - print(f"DBSCAN found {len(set(cluster_labels)) - 1} clusters") + logger.info(f"DBSCAN found {len(set(cluster_labels)) - 1} clusters") # Group faces by cluster labels clusters = defaultdict(list) @@ -336,7 +340,7 @@ def _update_cluster_face_image(cluster_id: str, face_image_base64: str) -> bool: return updated except Exception as e: - print(f"Error updating face image for cluster {cluster_id}: {e}") + logger.error(f"Error updating face image for cluster {cluster_id}: {e}") conn.rollback() return False finally: @@ -387,7 +391,7 @@ def _get_cluster_face_data(cluster_uuid: str) -> Optional[tuple]: return None except Exception as e: - print(f"Error getting face data for cluster {cluster_uuid}: {e}") + logger.error(f"Error getting face data for cluster {cluster_uuid}: {e}") return None finally: conn.close() @@ -487,7 +491,7 @@ def _crop_and_resize_face( return face_crop except Exception as e: - print(f"Error cropping and resizing face: {e}") + logger.error(f"Error cropping and resizing face: {e}") return None @@ -506,7 +510,7 @@ def _encode_image_to_base64(img: np.ndarray, format: str = ".jpg") -> Optional[s _, buffer = cv2.imencode(format, img) return base64.b64encode(buffer).decode("utf-8") except Exception as e: - print(f"Error encoding image to base64: {e}") + logger.error(f"Error encoding image to base64: {e}") return None @@ -545,7 +549,7 @@ def _generate_cluster_face_image(cluster_uuid: str) -> Optional[str]: return _encode_image_to_base64(face_crop) except Exception as e: - print(f"Error generating face image for cluster {cluster_uuid}: {e}") + logger.error(f"Error generating face image for cluster {cluster_uuid}: {e}") return None diff --git a/backend/app/utils/folders.py b/backend/app/utils/folders.py index d440abaaa..ec014f479 100644 --- a/backend/app/utils/folders.py +++ b/backend/app/utils/folders.py @@ -1,13 +1,17 @@ import uuid import os -from app.database.folders import db_insert_folders_batch from typing import List, Tuple + from fastapi import HTTPException, status -from app.schemas.folders import ErrorResponse from app.database.folders import ( + db_insert_folders_batch, db_update_parent_ids_for_subtree, db_delete_folders_batch, ) +from app.schemas.folders import ErrorResponse +from app.logging.setup_logging import get_logger + +logger = get_logger(__name__) def folder_util_add_folder_tree( @@ -169,6 +173,6 @@ def folder_util_add_multiple_folder_trees( except Exception as e: # Log the error but continue with other folders - print(f"Error adding folder {folder_path}: {e}") + logger.error(f"Error adding folder {folder_path}: {e}") return added_count, added_folders diff --git a/backend/app/utils/image_metadata.py b/backend/app/utils/image_metadata.py index 56c4b40ed..c5a91d3e6 100644 --- a/backend/app/utils/image_metadata.py +++ b/backend/app/utils/image_metadata.py @@ -3,6 +3,9 @@ from PIL.ExifTags import TAGS from datetime import datetime from PIL.TiffImagePlugin import IFDRational +from app.logging.setup_logging import get_logger + +logger = get_logger(__name__) def extract_metadata(image_path): @@ -43,8 +46,8 @@ def extract_metadata(image_path): metadata[str(tag).lower().replace(" ", "_")] = data except Exception as exif_error: - print( - f"Warning: Failed to extract EXIF data from {image_path}. Error: {exif_error}" + logger.warning( + f"Failed to extract EXIF data from {image_path}. Error: {exif_error}" ) except FileNotFoundError: @@ -60,8 +63,8 @@ def extract_metadata(image_path): try: metadata["file_size"] = os.path.getsize(image_path) except OSError as file_error: - print( - f"Warning: Could not retrieve file size for {image_path}. Error: {file_error}" + logger.warning( + f"Could not retrieve file size for {image_path}. Error: {file_error}" ) # Image creation date @@ -71,7 +74,7 @@ def extract_metadata(image_path): "%Y-%m-%d %H:%M:%S" ) except OSError as time_error: - print( - f"Warning: Could not retrieve creation date for {image_path}. Error: {time_error}" + logger.warning( + f"Could not retrieve creation date for {image_path}. Error: {time_error}" ) return metadata diff --git a/backend/app/utils/images.py b/backend/app/utils/images.py index d31b38184..c3b202205 100644 --- a/backend/app/utils/images.py +++ b/backend/app/utils/images.py @@ -18,6 +18,9 @@ ) from app.models.FaceDetector import FaceDetector from app.models.ObjectClassifier import ObjectClassifier +from app.logging.setup_logging import get_logger + +logger = get_logger(__name__) # GPS EXIF tag constant @@ -116,7 +119,7 @@ def image_util_classify_and_face_detect_images( if len(classes) > 0: # Create image-class pairs image_class_pairs = [(image_id, class_id) for class_id in classes] - logger.debug(f"Image class pairs: {image_class_pairs}") + logger.debug(f"Image-class pairs: {image_class_pairs}") # Insert the pairs into the database db_insert_image_classes_batch(image_class_pairs) diff --git a/backend/app/utils/microservice.py b/backend/app/utils/microservice.py index 48d61d4be..563c00841 100644 --- a/backend/app/utils/microservice.py +++ b/backend/app/utils/microservice.py @@ -5,9 +5,37 @@ from pathlib import Path from typing import Optional -import logging +import threading +import atexit +from app.logging.setup_logging import get_logger -logger = logging.getLogger(__name__) +logger = get_logger(__name__) + +# Global tracking for subprocess log threads +_log_threads = [] + + +def cleanup_log_threads(): + """Clean up log threads during shutdown to ensure all buffered logs are processed.""" + if _log_threads: + logger.info("Cleaning up log threads...") + for thread in _log_threads: + if thread.is_alive(): + thread.join(timeout=2.0) # Wait up to 2 seconds for each thread + _log_threads.clear() + logger.info("Log threads cleanup completed") + + +# Register cleanup function to run at exit +atexit.register(cleanup_log_threads) + + +def microservice_util_stop_sync_service(): + """ + Stop the sync microservice and clean up log threads. + This function should be called during application shutdown. + """ + cleanup_log_threads() def microservice_util_start_sync_service( @@ -43,6 +71,24 @@ def microservice_util_start_sync_service( return False +CYAN = "\033[96m" +RED = "\033[91m" +MAGENTA = "\033[95m" +RESET = "\033[0m" + + +def stream_logs(pipe, prefix, color): + """Read a process pipe and print formatted logs from sync-microservice.""" + for line in iter(pipe.readline, ""): + if line: + # Trim any trailing newlines + line = line.strip() + if line: + # All output from sync-microservice is now properly formatted by its logger + print(line) + pipe.close() + + def _start_frozen_sync_service() -> bool: """ Start the sync microservice when running as a frozen executable. @@ -76,14 +122,36 @@ def _start_frozen_sync_service() -> bool: logger.info(f"Starting sync microservice from: {sync_executable}") # Start the sync microservice executable + cmd = str(sync_executable) # Correct the command to use the actual path + logger.info(f"Starting sync microservice with command: {cmd}") + process = subprocess.Popen( - [str(sync_executable)], + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, - cwd=str(sync_dir), # Set working directory to sync service directory + bufsize=1, # Line buffered output + ) + + # Start background threads to forward output to logger + # Stream stdout with consistent SYNC-MICROSERVICE prefix + t1 = threading.Thread( + target=stream_logs, + args=(process.stdout, "SYNC-MICROSERVICE", CYAN), + daemon=False, ) + # Stream stderr with consistent SYNC-MICROSERVICE-ERR prefix + t2 = threading.Thread( + target=stream_logs, + args=(process.stderr, "SYNC-MICROSERVICE-ERR", RED), + daemon=False, + ) + + t1.start() + t2.start() + _log_threads.extend([t1, t2]) + logger.info(f"Sync microservice started with PID: {process.pid}") logger.info("Service should be available at http://localhost:8001") @@ -231,36 +299,52 @@ def _start_fastapi_service(python_executable: Path, service_path: Path) -> bool: original_cwd = os.getcwd() os.chdir(service_path) - # Command to start FastAPI dev server - print(python_executable) - host = "127.0.0.1" + # Command to start FastAPI server + logger.debug(f"Using Python executable: {python_executable}") + host = "127.0.0.1" # Local connections only for security port = "8001" - # On Windows, use a different approach with scripts path - - if platform.system().lower() == "windows": - # Use uvicorn directly to run the FastAPI app - cmd = [ - str(python_executable), - "-m", - "uvicorn", - "main:app", - "--host", - host, - "--port", - port, - "--reload", # Add reload flag for development convenience - ] - else: - # For non-Windows platforms - cmd = [str(python_executable), "-m", "fastapi", "dev", "--port", "8001"] + + # Basic uvicorn command that works on all platforms + cmd = [ + str(python_executable), + "-m", + "uvicorn", + "main:app", + "--host", + host, + "--port", + port, + "--reload", # Enable auto-reload for development + ] logger.info(f"Executing command: {' '.join(cmd)}") # Start the process (non-blocking) process = subprocess.Popen( - cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True + cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + bufsize=1, # Line buffered output + ) + + # Start background threads to forward output to logger + t1 = threading.Thread( + target=stream_logs, + args=(process.stdout, "SYNC-MICROSERVICE", CYAN), + daemon=False, + ) + + t2 = threading.Thread( + target=stream_logs, + args=(process.stderr, "SYNC-MICROSERVICE-ERR", RED), + daemon=False, ) + t1.start() + t2.start() + _log_threads.extend([t1, t2]) + # Restore original working directory os.chdir(original_cwd) diff --git a/backend/main.py b/backend/main.py index 1abfd8fdc..2c1f39e44 100644 --- a/backend/main.py +++ b/backend/main.py @@ -27,6 +27,17 @@ from app.routes.face_clusters import router as face_clusters_router from app.routes.user_preferences import router as user_preferences_router from fastapi.openapi.utils import get_openapi +from app.logging.setup_logging import ( + configure_uvicorn_logging, + setup_logging, + get_logger, +) + +# Set up standard logging first +setup_logging("backend") + +# Configure Uvicorn logging to use our custom formatter +configure_uvicorn_logging("backend") @asynccontextmanager @@ -66,6 +77,10 @@ async def lifespan(app: FastAPI): ) +# Initialize logger for this module +logger = get_logger(__name__) + + def generate_openapi_json(): try: openapi_schema = get_openapi( @@ -87,9 +102,9 @@ def generate_openapi_json(): with open(openapi_path, "w") as f: json.dump(openapi_schema, f, indent=2) - print(f"OpenAPI JSON generated at {openapi_path}") + logger.info(f"OpenAPI JSON generated at {openapi_path}") except Exception as e: - print(f"Failed to generate openapi.json: {e}") + logger.error(f"Failed to generate openapi.json: {e}") # Add CORS middleware @@ -122,11 +137,16 @@ async def root(): # Entry point for running with: python3 main.py if __name__ == "__main__": multiprocessing.freeze_support() # Required for Windows + logger = get_logger(__name__) + logger.info("Starting PictoPy backend server...") + + # Create a simple config with log_config=None to disable Uvicorn's default logging config = Config( app=app, host="0.0.0.0", port=8000, log_level="info", + log_config=None, # This is crucial - disable Uvicorn's default logging config ) server = Server(config) server.run() diff --git a/backend/requirements.txt b/backend/requirements.txt index 00bec243a..b848d7ad6 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -1,73 +1,73 @@ -annotated-types==0.6.0 -bcrypt==3.2.0 -certifi==2024.2.2 -click==8.1.7 -coloredlogs==15.0.1 -dnspython==2.6.1 -email_validator==2.1.1 -exceptiongroup==1.2.1 -fastapi==0.111.0 -fastapi-cli==0.0.3 -flatbuffers==24.3.25 -h11==0.14.0 -h2==4.1.0 -hpack==4.0.0 -httpcore==1.0.5 -httptools==0.6.1 -httpx==0.27.0 -humanfriendly==10.0 -Hypercorn==0.17.3 -hyperframe==6.0.1 -idna==3.7 -Jinja2==3.1.4 -joblib==1.4.2 -loguru==0.4.1 -markdown-it-py==3.0.0 -MarkupSafe==2.1.5 -mdurl==0.1.2 -mkdocs==1.6.1 -mkdocs-get-deps==0.2.0 -mkdocs-material==9.6.16 -mkdocs-material-extensions==1.3.1 -mkdocs-swagger-ui-tag==0.7.1 -mpmath==1.3.0 -numpy==1.26.4 -onnxruntime==1.17.1 -opencv-python==4.9.0.80 -orjson==3.10.3 -packaging==24.0 -pillow==10.3.0 -priority==2.0.0 -protobuf==5.26.1 -pydantic==2.7.1 -pydantic_core==2.18.2 -Pygments==2.18.0 -python-dotenv==1.0.1 -python-multipart==0.0.9 -PyYAML==6.0.1 -rich==13.7.1 -scikit-learn==1.5.1 -scipy==1.14.0 -shellingham==1.5.4 -sniffio==1.3.1 -starlette==0.37.2 -sympy==1.12 -taskgroup==0.0.0a4 -threadpoolctl==3.5.0 -tomli==2.0.1 -typer==0.12.3 -typing_extensions==4.11.0 -ujson==5.10.0 -uvicorn==0.30.1 -watchfiles==0.21.0 -websockets==12.0 -wsproto==1.2.0 -pyinstaller==6.11 -pytest>=8.2 -APScheduler==3.11.0 -pre-commit>=2.21.0 -black>=23.3.0 -ruff>=0.0.241 -psutil>=5.9.5 -pytest-asyncio>=1.0.0 -setuptools==66.1.1 \ No newline at end of file +annotated-types==0.6.0 +bcrypt==3.2.0 +certifi==2024.2.2 +click==8.1.7 +coloredlogs==15.0.1 +dnspython==2.6.1 +email_validator==2.1.1 +exceptiongroup==1.2.1 +fastapi==0.111.0 +fastapi-cli==0.0.3 +flatbuffers==24.3.25 +h11==0.14.0 +h2==4.1.0 +hpack==4.0.0 +httpcore==1.0.5 +httptools==0.6.1 +httpx==0.27.0 +humanfriendly==10.0 +Hypercorn==0.17.3 +hyperframe==6.0.1 +idna==3.7 +Jinja2==3.1.4 +joblib==1.4.2 +loguru==0.4.1 +markdown-it-py==3.0.0 +MarkupSafe==2.1.5 +mdurl==0.1.2 +mkdocs==1.6.1 +mkdocs-get-deps==0.2.0 +mkdocs-material==9.6.16 +mkdocs-material-extensions==1.3.1 +mkdocs-swagger-ui-tag==0.7.1 +mpmath==1.3.0 +numpy==1.26.4 +onnxruntime==1.17.1 +opencv-python==4.9.0.80 +orjson==3.10.3 +packaging==24.0 +pillow==10.3.0 +priority==2.0.0 +protobuf==5.26.1 +pydantic==2.7.1 +pydantic_core==2.18.2 +Pygments==2.18.0 +python-dotenv==1.0.1 +python-multipart==0.0.9 +PyYAML==6.0.1 +rich==13.7.1 +scikit-learn==1.5.1 +scipy==1.14.0 +shellingham==1.5.4 +sniffio==1.3.1 +starlette==0.37.2 +sympy==1.12 +taskgroup==0.0.0a4 +threadpoolctl==3.5.0 +tomli==2.0.1 +typer==0.12.3 +typing_extensions==4.11.0 +ujson==5.10.0 +uvicorn==0.30.1 +watchfiles==0.21.0 +websockets==12.0 +wsproto==1.2.0 +pyinstaller==6.11 +pytest>=8.2 +APScheduler==3.11.0 +pre-commit>=2.21.0 +black>=23.3.0 +ruff>=0.0.241 +psutil>=5.9.5 +pytest-asyncio>=1.0.0 +setuptools==66.1.1 diff --git a/sync-microservice/app/core/lifespan.py b/sync-microservice/app/core/lifespan.py index e646debe8..e0e12eaa9 100644 --- a/sync-microservice/app/core/lifespan.py +++ b/sync-microservice/app/core/lifespan.py @@ -7,6 +7,9 @@ watcher_util_start_folder_watcher, watcher_util_stop_folder_watcher, ) +from app.logging.setup_logging import get_sync_logger + +logger = get_sync_logger(__name__) # Global variable to track watcher status watcher_started = False @@ -21,34 +24,34 @@ async def lifespan(app: FastAPI): try: # Startup - print("Starting PictoPy Sync Microservice...") + logger.info("Starting PictoPy Sync Microservice...") # Check database connection if not db_check_database_connection(): - print("Failed to connect to PictoPy database") - print( + logger.error("Failed to connect to PictoPy database") + logger.error( "Make sure the main PictoPy backend is set up and the database exists" ) raise RuntimeError("Database connection failed") - print("Database connection successful") + logger.info("Database connection successful") watcher_started = watcher_util_start_folder_watcher() - print("Sync microservice is ready!") + logger.info("Sync microservice is ready!") yield except KeyboardInterrupt: - print("\nReceived keyboard interrupt (Ctrl+C)") - print("Initiating graceful shutdown...") + logger.info("\nReceived keyboard interrupt (Ctrl+C)") + logger.info("Initiating graceful shutdown...") except Exception as e: - print(f"Unexpected error during startup: {e}") + logger.error(f"Unexpected error during startup: {e}") raise finally: # Shutdown - print("Shutting down sync microservice...") + logger.info("Shutting down sync microservice...") if watcher_started: watcher_util_stop_folder_watcher() - print("Folder watcher stopped") - print("Shutdown complete") + logger.info("Folder watcher stopped") + logger.info("Shutdown complete") diff --git a/sync-microservice/app/database/folders.py b/sync-microservice/app/database/folders.py index 8f413650e..0cc6b3ade 100644 --- a/sync-microservice/app/database/folders.py +++ b/sync-microservice/app/database/folders.py @@ -1,6 +1,9 @@ import sqlite3 from typing import List, Tuple, NamedTuple from app.config.settings import DATABASE_PATH +from app.logging.setup_logging import get_sync_logger + +logger = get_sync_logger(__name__) # Type definitions FolderId = str @@ -35,7 +38,7 @@ def db_get_all_folders_with_ids() -> List[FolderIdPath]: ) return cursor.fetchall() except Exception as e: - print(f"Error getting folders from database: {e}") + logger.error(f"Error getting folders from database: {e}") return [] finally: conn.close() @@ -62,7 +65,7 @@ def db_check_database_connection() -> bool: result = cursor.fetchone() return result is not None except Exception as e: - print(f"Database connection error: {e}") + logger.error(f"Database connection error: {e}") return False finally: if conn is not None: diff --git a/sync-microservice/app/logging/__init__.py b/sync-microservice/app/logging/__init__.py new file mode 100644 index 000000000..345961476 --- /dev/null +++ b/sync-microservice/app/logging/__init__.py @@ -0,0 +1,9 @@ +""" +__init__.py for the sync-microservice.app.logging package. + +This file allows the package to be imported and initializes logging. +""" + +from .setup_logging import get_sync_logger, configure_uvicorn_logging, setup_logging + +__all__ = ["get_sync_logger", "configure_uvicorn_logging", "setup_logging"] diff --git a/sync-microservice/app/logging/setup_logging.py b/sync-microservice/app/logging/setup_logging.py new file mode 100644 index 000000000..b3e91ad35 --- /dev/null +++ b/sync-microservice/app/logging/setup_logging.py @@ -0,0 +1,300 @@ +""" +Core logging module for the PictoPy project. + +This module provides centralized logging functionality for all components +of the PictoPy project, including color coding and consistent formatting. +""" + +import os +import json +import logging +import sys +from pathlib import Path +from typing import Optional, Dict, Any + + +class ColorFormatter(logging.Formatter): + """ + Custom formatter that adds color to log messages based on their level. + """ + + # ANSI color codes + COLORS = { + "black": "\033[30m", + "red": "\033[31m", + "green": "\033[32m", + "yellow": "\033[33m", + "blue": "\033[34m", + "magenta": "\033[35m", + "cyan": "\033[36m", + "white": "\033[37m", + "bg_black": "\033[40m", + "bg_red": "\033[41m", + "bg_green": "\033[42m", + "bg_yellow": "\033[43m", + "bg_blue": "\033[44m", + "bg_magenta": "\033[45m", + "bg_cyan": "\033[46m", + "bg_white": "\033[47m", + "reset": "\033[0m", + } + + def __init__( + self, + fmt: str, + component_config: Dict[str, Any], + level_colors: Dict[str, str], + use_colors: bool = True, + ): + """ + Initialize the formatter with the given format string and color settings. + + Args: + fmt: The format string to use + component_config: Configuration for the component (prefix and color) + level_colors: Dictionary mapping log levels to colors + use_colors: Whether to use colors in log output + """ + super().__init__(fmt) + self.component_config = component_config + self.level_colors = level_colors + self.use_colors = use_colors + + def format(self, record: logging.LogRecord) -> str: + """Format the log record with colors and component prefix.""" + # Add component information to the record + component_prefix = self.component_config.get("prefix", "") + record.component = component_prefix + + # Format the message + formatted_message = super().format(record) + + if not self.use_colors: + return formatted_message + + # Add color to the component prefix + component_color = self.component_config.get("color", "") + if component_color and component_color in self.COLORS: + component_start = formatted_message.find(f"[{component_prefix}]") + if component_start >= 0: + component_end = component_start + len(f"[{component_prefix}]") + formatted_message = ( + formatted_message[:component_start] + + self.COLORS[component_color] + + formatted_message[component_start:component_end] + + self.COLORS["reset"] + + formatted_message[component_end:] + ) + + # Add color to the log level + level_color = self.level_colors.get(record.levelname, "") + if level_color: + # Handle comma-separated color specs like "red,bg_white" + color_codes = "" + for color in level_color.split(","): + if color in self.COLORS: + color_codes += self.COLORS[color] + + if color_codes: + level_start = formatted_message.find(f" {record.levelname} ") + if level_start >= 0: + level_end = level_start + len(f" {record.levelname} ") + formatted_message = ( + formatted_message[:level_start] + + color_codes + + formatted_message[level_start:level_end] + + self.COLORS["reset"] + + formatted_message[level_end:] + ) + + return formatted_message + + +def load_config() -> Dict[str, Any]: + """ + Load the logging configuration from the JSON file. + + Returns: + Dict containing the logging configuration + """ + config_path = ( + Path(__file__).parent.parent.parent.parent + / "utils" + / "logging" + / "logging_config.json" + ) + try: + with open(config_path, "r") as f: + return json.load(f) + except (FileNotFoundError, json.JSONDecodeError) as e: + print(f"Error loading logging configuration: {e}", file=sys.stderr) + return {} + + +def setup_logging(component_name: str, environment: Optional[str] = None) -> None: + """ + Set up logging for the given component. + + Args: + component_name: The name of the component (e.g., "backend", "sync-microservice") + environment: The environment to use (e.g., "development", "production") + If None, uses the environment specified in the config or "development" + """ + config = load_config() + if not config: + print( + "No logging configuration found. Using default settings.", file=sys.stderr + ) + return + + # Get environment settings + if not environment: + environment = os.environ.get( + "ENV", config.get("default_environment", "development") + ) + + env_settings = config.get("environments", {}).get(environment, {}) + log_level = getattr(logging, env_settings.get("level", "INFO"), logging.INFO) + use_colors = env_settings.get("colored_output", True) + console_logging = env_settings.get("console_logging", True) + + # Get component configuration + component_config = config.get("components", {}).get( + component_name, {"prefix": component_name.upper(), "color": "white"} + ) + + # Configure root logger + root_logger = logging.getLogger() + root_logger.setLevel(log_level) + + # Clear existing handlers + for handler in root_logger.handlers[:]: + root_logger.removeHandler(handler) + + # Set up console handler + if console_logging: + console_handler = logging.StreamHandler(sys.stdout) + console_handler.setLevel(log_level) + + # Create formatter with component and color information + fmt = ( + config.get("formatters", {}) + .get("default", {}) + .get("format", "[%(component)s] | %(levelname)s | %(message)s") + ) + formatter = ColorFormatter( + fmt, component_config, config.get("colors", {}), use_colors + ) + console_handler.setFormatter(formatter) + root_logger.addHandler(console_handler) + + +def get_logger(name: str) -> logging.Logger: + """ + Get a logger with the given name. + + Args: + name: Name of the logger, typically the module name + + Returns: + Logger instance + """ + return logging.getLogger(name) + + +def get_sync_logger(name: str) -> logging.Logger: + """ + Get a logger with the given name specifically for the sync-microservice component. + Ensures all logs are properly prefixed with [SYNC-MICROSERVICE]. + + Args: + name: Name of the logger, typically the module name + + Returns: + Logger instance + """ + logger = logging.getLogger(name) + return logger + + +class InterceptHandler(logging.Handler): + """ + Handler to intercept logs from other loggers (like Uvicorn) and redirect them + through our custom logger. + + This implementation is based on Loguru's approach and routes logs directly to + the root logger. + """ + + def __init__(self, component_name: str): + """ + Initialize the InterceptHandler. + + Args: + component_name: The name of the component (e.g., "backend") + """ + super().__init__() + self.component_name = component_name + + def emit(self, record: logging.LogRecord) -> None: + """ + Process a log record by forwarding it through our custom logger. + + Args: + record: The log record to process + """ + # Get the appropriate module name + module_name = record.name + if "." in module_name: + module_name = module_name.split(".")[-1] + + # Create a message that includes the original module in the format + msg = record.getMessage() + + # Find the appropriate logger + logger = get_logger(module_name) + + # Log the message with our custom formatting + logger.log(record.levelno, f"[uvicorn] {msg}") + + +def configure_uvicorn_logging(component_name: str) -> None: + """ + Configure Uvicorn logging to match our format. + + Args: + component_name: The name of the component (e.g., "backend") + """ + import logging.config + + # Create an intercept handler with our component name + intercept_handler = InterceptHandler(component_name) + + # Make sure the handler uses our ColorFormatter + config = load_config() + component_config = config.get("components", {}).get( + component_name, {"prefix": component_name.upper(), "color": "white"} + ) + level_colors = config.get("colors", {}) + env_settings = config.get("environments", {}).get( + os.environ.get("ENV", config.get("default_environment", "development")), {} + ) + use_colors = env_settings.get("colored_output", True) + + fmt = "[%(component)s] | %(module)s | %(levelname)s | %(message)s" + formatter = ColorFormatter(fmt, component_config, level_colors, use_colors) + intercept_handler.setFormatter(formatter) + + # Configure Uvicorn loggers to use our handler + for logger_name in ["uvicorn", "uvicorn.error", "uvicorn.access"]: + uvicorn_logger = logging.getLogger(logger_name) + uvicorn_logger.handlers = [] # Clear existing handlers + uvicorn_logger.propagate = False # Don't propagate to root + uvicorn_logger.setLevel(logging.INFO) # Ensure log level is at least INFO + uvicorn_logger.addHandler(intercept_handler) + + # Also configure asyncio logger similarly + asyncio_logger = logging.getLogger("asyncio") + asyncio_logger.handlers = [] + asyncio_logger.propagate = False + asyncio_logger.addHandler(intercept_handler) diff --git a/sync-microservice/app/utils/logger_writer.py b/sync-microservice/app/utils/logger_writer.py new file mode 100644 index 000000000..c8c81f332 --- /dev/null +++ b/sync-microservice/app/utils/logger_writer.py @@ -0,0 +1,99 @@ +""" +Logger Writer utility for redirecting stdout/stderr to logging system. + +This module provides a custom writer class that can redirect standard output +and error streams to the logging system, ensuring all output goes through +the centralized logging configuration. +""" + +import logging +import sys +from contextlib import contextmanager +from typing import Generator + + +class LoggerWriter: + """Custom writer that redirects stdout/stderr to logger.""" + + def __init__(self, logger: logging.Logger, level: int): + """ + Initialize the LoggerWriter. + + Args: + logger: The logger instance to write to + level: The logging level to use (e.g., logging.INFO, logging.ERROR) + """ + self.logger = logger + self.level = level + self.buffer = "" + self._in_write = False # Recursion guard + + def write(self, message: str) -> None: + """ + Write a message to the logger. + + Args: + message: The message to write + """ + # Prevent infinite recursion if logging tries to write to stdout/stderr + if self._in_write: + return + + self._in_write = True + try: + # Buffer the message until we get a complete line + self.buffer += message + if message.endswith("\n"): + # Log the complete line (minus the newline) + line = self.buffer.rstrip("\n") + if line: # Only log non-empty lines + self.logger.log(self.level, line) + self.buffer = "" + finally: + self._in_write = False + + def flush(self) -> None: + """Flush any remaining buffer content.""" + # Prevent infinite recursion if logging tries to write to stdout/stderr + if self._in_write: + return + + self._in_write = True + try: + if self.buffer: + self.logger.log(self.level, self.buffer) + self.buffer = "" + finally: + self._in_write = False + + +@contextmanager +def redirect_stdout_stderr( + logger: logging.Logger, + stdout_level: int = logging.INFO, + stderr_level: int = logging.ERROR, +) -> Generator[None, None, None]: + """ + Context manager for safely redirecting stdout/stderr to logger. + + This provides a safer alternative to global redirection. + + Args: + logger: The logger instance to write to + stdout_level: Logging level for stdout messages + stderr_level: Logging level for stderr messages + + Example: + with redirect_stdout_stderr(logger): + print("This will be logged instead of printed") + """ + original_stdout = sys.stdout + original_stderr = sys.stderr + + try: + sys.stdout = LoggerWriter(logger, stdout_level) + sys.stderr = LoggerWriter(logger, stderr_level) + yield + finally: + sys.stdout = original_stdout + sys.stderr = original_stderr diff --git a/sync-microservice/app/utils/watcher.py b/sync-microservice/app/utils/watcher.py index a4915d70c..902a50cf5 100644 --- a/sync-microservice/app/utils/watcher.py +++ b/sync-microservice/app/utils/watcher.py @@ -1,11 +1,23 @@ import os import threading import time +import logging from typing import List, Tuple, Dict, Optional from watchfiles import watch, Change import httpx from app.database.folders import db_get_all_folders_with_ids from app.config.settings import PRIMARY_BACKEND_URL +from app.logging.setup_logging import get_sync_logger + +# Configure third-party loggers +logging.getLogger("httpx").setLevel(logging.WARNING) +logging.getLogger("httpcore").setLevel(logging.WARNING) +logging.getLogger("watchfiles").setLevel(logging.WARNING) # Silence watchfiles logger +logging.getLogger("watchfiles.main").setLevel( + logging.WARNING +) # Silence watchfiles.main logger + +logger = get_sync_logger(__name__) FolderIdPath = Tuple[str, str] @@ -46,38 +58,32 @@ def watcher_util_handle_file_changes(changes: set) -> None: """ deleted_folder_ids = [] - for change, file_path in changes: - print(f"File change detected: {change} - {file_path}") + affected_folders = {} # folder_path -> folder_id mapping - # Check if this is a deleted folder that we're watching - is_deleted_watched_folder = False + # First pass - count changes and identify affected folders + for change, file_path in changes: + # Process deletions if change == Change.deleted: deleted_folder_id = watcher_util_get_folder_id_if_watched(file_path) if deleted_folder_id: - print( - f" Watched folder deleted: {file_path} (ID: {deleted_folder_id})" - ) deleted_folder_ids.append(deleted_folder_id) - is_deleted_watched_folder = True + continue - # Execute for additions, modifications, and also for deleted image files within watched folders - # (ensuring image deletions trigger a sync of their parent folders) - if not is_deleted_watched_folder: - closest_folder = watcher_util_find_closest_parent_folder( - file_path, watched_folders - ) + # Find affected folder + closest_folder = watcher_util_find_closest_parent_folder( + file_path, watched_folders + ) + if closest_folder: + folder_id, folder_path = closest_folder + affected_folders[folder_path] = folder_id - if closest_folder: - folder_id, folder_path = closest_folder - print(f" Closest parent folder: {folder_path} (ID: {folder_id})") + # Process affected folders + for folder_path, folder_id in affected_folders.items(): + watcher_util_call_sync_folder_api(folder_id, folder_path) - watcher_util_call_sync_folder_api(folder_id, folder_path) - else: - print(f" No watched parent folder found for: {file_path}") - - # If any watched folders were deleted, call the delete API + # Handle deleted folders if deleted_folder_ids: - print(f"Calling delete API for {len(deleted_folder_ids)} deleted folders") + logger.info(f"Processing {len(deleted_folder_ids)} deleted folders") watcher_util_call_delete_folders_api(deleted_folder_ids) watcher_util_restart_folder_watcher() @@ -113,7 +119,6 @@ def watcher_util_find_closest_parent_folder( if len(folder_path) > longest_match_length: longest_match_length = len(folder_path) best_match = (folder_id, folder_path) - print("best match: ", best_match) return best_match @@ -134,16 +139,18 @@ def watcher_util_call_sync_folder_api(folder_id: str, folder_path: str) -> None: response = client.request("POST", url, json=payload) if response.status_code == 200: - print(f"Successfully synced folder {folder_path} (ID: {folder_id})") + logger.info( + f"Successfully synced folder {folder_path} (ID: {folder_id})" + ) else: - print( + logger.error( f"Failed to sync folder {folder_path}. Status: {response.status_code}, Response: {response.text}" ) except httpx.RequestError as e: - print(f"Network error while syncing folder {folder_path}: {e}") + logger.error(f"Network error while syncing folder {folder_path}: {e}") except Exception as e: - print(f"Unexpected error while syncing folder {folder_path}: {e}") + logger.error(f"Unexpected error while syncing folder {folder_path}: {e}") def watcher_util_call_delete_folders_api(folder_ids: List[str]) -> None: @@ -161,16 +168,16 @@ def watcher_util_call_delete_folders_api(folder_ids: List[str]) -> None: response = client.request("DELETE", url, json=payload) if response.status_code == 200: - print(f"Successfully deleted folders with IDs: {folder_ids}") + logger.info(f"Successfully deleted folders with IDs: {folder_ids}") else: - print( + logger.error( f"Failed to delete folders. Status: {response.status_code}, Response: {response.text}" ) except httpx.RequestError as e: - print(f"Network error while deleting folders {folder_ids}: {e}") + logger.error(f"Network error while deleting folders {folder_ids}: {e}") except Exception as e: - print(f"Unexpected error while deleting folders {folder_ids}: {e}") + logger.error(f"Unexpected error while deleting folders {folder_ids}: {e}") def watcher_util_watcher_worker(folder_paths: List[str]) -> None: @@ -181,16 +188,28 @@ def watcher_util_watcher_worker(folder_paths: List[str]) -> None: folder_paths: List of folder paths to watch """ try: - print(f"Starting watcher for {len(folder_paths)} folders") - for changes in watch(*folder_paths, stop_event=stop_event, recursive=False): + logger.info(f"Starting watcher for {len(folder_paths)} folders") + + logger.debug(f"Watching folders: {folder_paths}") + for changes in watch( + *folder_paths, stop_event=stop_event, recursive=True, debug=False + ): if stop_event.is_set(): - print("Stop event detected in watcher loop") + logger.info("Stop event detected in watcher loop") break + + # Log changes at debug level before processing + if logger.isEnabledFor(10): # DEBUG level is 10 + from app.utils.watcher_helpers import format_debug_changes + + logger.debug("Detailed changes:\n %s", format_debug_changes(changes)) + + # Process changes watcher_util_handle_file_changes(changes) except Exception as e: - print(f"Error in watcher worker: {e}") + logger.error(f"Error in watcher worker: {e}") finally: - print("Watcher stopped") + logger.info("Watcher stopped") def watcher_util_get_existing_folders( @@ -210,7 +229,7 @@ def watcher_util_get_existing_folders( if os.path.exists(folder_path) and os.path.isdir(folder_path): existing_folders.append((folder_id, folder_path)) else: - print(f"Warning: Folder does not exist: {folder_path}") + logger.warning(f"Folder does not exist: {folder_path}") return existing_folders @@ -229,24 +248,25 @@ def watcher_util_start_folder_watcher() -> bool: global watcher_thread, watched_folders, folder_id_map if watcher_util_is_watcher_running(): - print("Watcher is already running.") + logger.info("Watcher is already running.") return False - print("Initializing folder watcher...") + logger.info("Initializing folder watcher...") + logger.debug("Debug logging is enabled") try: # Simple synchronous database call folders = db_get_all_folders_with_ids() if not folders: - print("No folders found in database") + logger.info("No folders found in database") return False - print(f"Found {len(folders)} folders in database") + logger.info(f"Found {len(folders)} folders in database") # Simple synchronous file system checks existing_folders = watcher_util_get_existing_folders(folders) if not existing_folders: - print("No existing folders to watch") + logger.info("No existing folders to watch") return False watched_folders = existing_folders @@ -256,9 +276,9 @@ def watcher_util_start_folder_watcher() -> bool: folder_paths = [folder_path for _, folder_path in existing_folders] - print(f"Starting to watch {len(folder_paths)} folders:") + logger.info(f"Starting to watch {len(folder_paths)} folders:") for folder_id, folder_path in existing_folders: - print(f" - {folder_path} (ID: {folder_id})") + logger.info(f" - {folder_path} (ID: {folder_id})") # Reset stop event and start background thread stop_event.clear() @@ -269,11 +289,11 @@ def watcher_util_start_folder_watcher() -> bool: ) watcher_thread.start() - print("Folder watcher started successfully") + logger.info("Folder watcher started successfully") return True except Exception as e: - print(f"Error starting folder watcher: {e}") + logger.error(f"Error starting folder watcher: {e}") return False @@ -282,11 +302,11 @@ def watcher_util_stop_folder_watcher() -> None: global watcher_thread, watched_folders, folder_id_map if not watcher_util_is_watcher_running(): - print("Watcher is not running") + logger.info("Watcher is not running") return try: - print("Stopping folder watcher...") + logger.info("Stopping folder watcher...") # Signal the watcher to stop stop_event.set() @@ -295,12 +315,12 @@ def watcher_util_stop_folder_watcher() -> None: watcher_thread.join(timeout=5.0) if watcher_thread.is_alive(): - print("Warning: Watcher thread did not stop gracefully") + logger.warning("Warning: Watcher thread did not stop gracefully") else: - print("Watcher stopped successfully") + logger.info("Watcher stopped successfully") except Exception as e: - print(f"Error stopping watcher: {e}") + logger.error(f"Error stopping watcher: {e}") finally: watcher_thread = None # Clear state @@ -315,7 +335,7 @@ def watcher_util_restart_folder_watcher() -> bool: Returns: True if restart was successful, False otherwise """ - print("Restarting folder watcher...") + logger.info("Restarting folder watcher...") watcher_util_stop_folder_watcher() return watcher_util_start_folder_watcher() @@ -342,27 +362,27 @@ def watcher_util_wait_for_watcher() -> None: try: watcher_thread.join() # Wait indefinitely except KeyboardInterrupt: - print("Interrupted by user") + logger.info("Interrupted by user") watcher_util_stop_folder_watcher() else: - print("No watcher thread to wait for") + logger.info("No watcher thread to wait for") # Simple usage examples def main(): """Simple example of how to use the folder watcher.""" - print("Starting folder watcher example...") + logger.info("Starting folder watcher example...") success = watcher_util_start_folder_watcher() if success: - print("Watcher started, will run for 10 seconds...") + logger.info("Watcher started, will run for 10 seconds...") try: time.sleep(10) # Just sleep - no async complexity! except KeyboardInterrupt: - print("Interrupted by user") + logger.info("Interrupted by user") finally: watcher_util_stop_folder_watcher() else: - print("Failed to start watcher") + logger.error("Failed to start watcher") - print("Example finished") + logger.info("Example finished") diff --git a/sync-microservice/app/utils/watcher_helpers.py b/sync-microservice/app/utils/watcher_helpers.py new file mode 100644 index 000000000..9f0b7f084 --- /dev/null +++ b/sync-microservice/app/utils/watcher_helpers.py @@ -0,0 +1,33 @@ +from typing import Set, Tuple +from watchfiles import Change + + +def format_debug_changes(changes: Set[Tuple[Change, str]]) -> str: + """ + Format file changes into a readable format. + + Args: + changes: Set of (change_type, file_path) tuples from watchfiles + + Returns: + Formatted string of change messages + """ + if not changes: + return "" + debug_changes = [] + try: + for change, path in sorted( + changes, key=lambda x: x[1] if x[1] is not None else "" + ): + change_type = ( + "deleted" + if change == Change.deleted + else "modified" + if change == Change.modified + else "added" + ) + debug_changes.append(f" - {change_type}: {path}") + indented = "\n".join(" " + line for line in debug_changes) + return indented + except Exception as e: + return f"Error formatting changes: {str(e)}" diff --git a/sync-microservice/main.py b/sync-microservice/main.py index db62c605d..449ac5572 100644 --- a/sync-microservice/main.py +++ b/sync-microservice/main.py @@ -1,8 +1,26 @@ +import logging from fastapi import FastAPI from uvicorn import Config, Server from app.core.lifespan import lifespan from app.routes import health, watcher, folders from fastapi.middleware.cors import CORSMiddleware +from app.logging.setup_logging import ( + get_sync_logger, + configure_uvicorn_logging, + setup_logging, +) +from app.utils.logger_writer import redirect_stdout_stderr + +# Set up standard logging +setup_logging("sync-microservice") + +# Configure Uvicorn logging to use our custom formatter +configure_uvicorn_logging("sync-microservice") + +# Use the sync-specific logger for this module +logger = get_sync_logger(__name__) + +logger.info("Starting PictoPy Sync Microservice...") # Create FastAPI app with lifespan management app = FastAPI( @@ -24,6 +42,20 @@ app.include_router(folders.router, prefix="/api/v1") if __name__ == "__main__": - config = Config(app=app, host="0.0.0.0", port=8001, log_level="info") + logger.info("Starting PictoPy Sync Microservice from main...") + + # Create config with log_config=None to disable Uvicorn's default logging + config = Config( + app=app, + host="0.0.0.0", + port=8001, + log_level="info", + log_config=None, # Disable uvicorn's default logging config + ) server = Server(config) - server.run() + + # Use context manager for safe stdout/stderr redirection + with redirect_stdout_stderr( + logger, stdout_level=logging.INFO, stderr_level=logging.ERROR + ): + server.run() diff --git a/sync-microservice/requirements.txt b/sync-microservice/requirements.txt index df5e00068..857a60b2f 100644 --- a/sync-microservice/requirements.txt +++ b/sync-microservice/requirements.txt @@ -35,4 +35,5 @@ typing-inspection==0.4.1 urllib3==2.5.0 uvicorn==0.35.0 watchfiles==1.1.0 -websockets==15.0.1 \ No newline at end of file +websockets==15.0.1 +loguru==0.7.3 \ No newline at end of file diff --git a/utils/logging/__init__.py b/utils/logging/__init__.py new file mode 100644 index 000000000..1def09667 --- /dev/null +++ b/utils/logging/__init__.py @@ -0,0 +1,9 @@ +""" +__init__.py for the utils.logging package. + +This file allows the package to be imported. +""" + +from .core import setup_logging, get_logger, configure_uvicorn_logging + +__all__ = ["setup_logging", "get_logger", "configure_uvicorn_logging"] diff --git a/utils/logging/logging_config.json b/utils/logging/logging_config.json new file mode 100644 index 000000000..1a2d893f9 --- /dev/null +++ b/utils/logging/logging_config.json @@ -0,0 +1,50 @@ +{ + "version": 1, + "formatters": { + "default": { + "format": "[%(component)s] | %(levelname)s | %(message)s" + } + }, + "components": { + "backend": { + "prefix": "BACKEND", + "color": "blue" + }, + "sync-microservice": { + "prefix": "SYNC-MICROSERVICE", + "color": "magenta" + }, + "watchfiles": { + "prefix": "WATCHFILES", + "color": "cyan" + } + }, + "colors": { + "DEBUG": "cyan", + "INFO": "green", + "WARNING": "yellow", + "ERROR": "red", + "CRITICAL": "red,bg_white" + }, + "environments": { + "development": { + "level": "DEBUG", + "colored_output": true, + "console_logging": true, + "loggers": { + "watchfiles": { + "level": "INFO" + }, + "watchfiles.main": { + "level": "INFO" + } + } + }, + "production": { + "level": "INFO", + "colored_output": false, + "console_logging": true + } + }, + "default_environment": "development" +} \ No newline at end of file