diff --git a/.env.example b/.env.example index 3d1146c..018d2cd 100644 --- a/.env.example +++ b/.env.example @@ -3,13 +3,15 @@ PORT=8000 ADDON_ID=com.bimal.watchly ADDON_NAME=Watchly REDIS_URL=redis://redis:6379/0 -TOKEN_SALT=replace-with-a-long-random-string TOKEN_TTL_SECONDS=0 ANNOUNCEMENT_HTML= HOST_NAME= RECOMMENDATION_SOURCE_ITEMS_LIMIT=10 # fetches recent watched/loved 10 movies and series to recommend based on those - +TOKEN_SALT=change-me +# generate some very long random string preferrably using cryptography libraries # UPDATER -CATALOG_UPDATE_MODE=cron +CATALOG_UPDATE_MODE=cron # Available options: cron, interval +# cron updates catalogs at specified times +# interval updates in specific intervals CATALOG_UPDATE_CRON_SCHEDULES=[{"hour": 12, "minute": 0, "id": "catalog_refresh_noon"},{"hour": 0, "minute": 0, "id": "catalog_refresh_midnight"}] CATALOG_REFRESH_INTERVAL_SECONDS=6*60*60 diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 1b5c6c7..d881463 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,2 +1,3 @@ +custom: ["https://buymemomo.com/timilsinabimal"] ko_fi: TimilsinaBimal -custom: ["https://www.paypal.com/donate/?hosted_button_id=KRQMVS34FC5KC"] +github: ["TimilsinaBimal"] diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f699f9b..1394663 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,6 +8,9 @@ on: push: branches: - main + paths: + - 'app/core/version.py' + - 'pyproject.toml' concurrency: group: ${{ github.head_ref || github.run_id }} @@ -18,7 +21,7 @@ jobs: runs-on: ubuntu-latest permissions: id-token: write - contents: read + contents: write steps: - name: Checkout code @@ -30,14 +33,50 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.CR_TOKEN }} + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: '3.11' + + - name: Read version from version.py + id: get-version + run: | + # Try Python import first + VERSION=$(python -c "import sys; sys.path.insert(0, '.'); from app.core.version import __version__; print(__version__)" 2>/dev/null || echo "") + # Fallback to regex if import fails + if [ -z "${VERSION}" ]; then + VERSION=$(grep -oP '__version__\s*=\s*"\K[^"]*' app/core/version.py || echo "0.0.0") + fi + echo "VERSION=${VERSION}" >> $GITHUB_OUTPUT + echo "Read version: ${VERSION}" + - name: Set Docker image tag id: set-tag run: | - echo "IMAGE_TAG=${GITHUB_SHA::7}" >> $GITHUB_OUTPUT + VERSION="${{ steps.get-version.outputs.VERSION }}" + echo "IMAGE_TAG=${VERSION}" >> $GITHUB_OUTPUT + echo "Building Docker image with version: ${VERSION}" - name: Build and Push Docker image working-directory: "./" run: | REPO_NAME="${GITHUB_REPOSITORY,,}" - docker build -t ghcr.io/${REPO_NAME}:${{ steps.set-tag.outputs.IMAGE_TAG }} . - docker push ghcr.io/${REPO_NAME}:${{ steps.set-tag.outputs.IMAGE_TAG }} + IMAGE_TAG="${{ steps.set-tag.outputs.IMAGE_TAG }}" + # Build and tag with version + docker build -t ghcr.io/${REPO_NAME}:${IMAGE_TAG} . + docker push ghcr.io/${REPO_NAME}:${IMAGE_TAG} + # Also tag as latest + docker tag ghcr.io/${REPO_NAME}:${IMAGE_TAG} ghcr.io/${REPO_NAME}:latest + docker push ghcr.io/${REPO_NAME}:latest + + - name: Create and Push Git Tag + run: | + VERSION="${{ steps.get-version.outputs.VERSION }}" + # Check if tag already exists + if git rev-parse "$VERSION" >/dev/null 2>&1; then + echo "Tag $VERSION already exists, skipping" + else + echo "Creating tag: $VERSION" + git tag $VERSION + git push origin $VERSION + fi diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..7340975 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,98 @@ +name: Create GitHub Release + +on: + workflow_run: + workflows: ["Build and Push Docker Image"] + types: + - completed + branches: + - main + push: + tags: + - '*' # Also trigger on manual tag pushes + + +jobs: + release: + runs-on: ubuntu-latest + # Only run if the triggering workflow succeeded + if: ${{ github.event_name == 'push' || github.event.workflow_run.conclusion == 'success' }} + + permissions: + packages: write + contents: write + + steps: + - name: Checkout repository + uses: actions/checkout@v5 + with: + fetch-depth: 0 # Fetch all history for all tags and branches + fetch-tags: true # Fetch all tags + ref: ${{ github.event.workflow_run.head_branch || github.ref }} + + - name: Set up Python + uses: actions/setup-python@v6 + with: + python-version: '3.11' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install openai pydantic + + - name: Get current tag + id: get-tag + run: | + # If triggered by workflow_run, read version from version.py + if [ "${{ github.event_name }}" = "workflow_run" ]; then + # Read version from version.py (same as CI workflow did) + VERSION=$(grep -oP '__version__\s*=\s*"\K[^"]*' app/core/version.py || echo "") + if [ -z "${VERSION}" ]; then + # Fallback: try Python import + VERSION=$(python -c "import sys; sys.path.insert(0, '.'); from app.core.version import __version__; print(__version__)" 2>/dev/null || echo "") + fi + if [ -z "${VERSION}" ]; then + echo "Error: Could not read version from version.py" + exit 1 + fi + echo "TAG_NAME=${VERSION}" >> $GITHUB_OUTPUT + echo "Tag from version.py: ${VERSION}" + else + # If triggered by tag push, get from GITHUB_REF + TAG_NAME=${GITHUB_REF#refs/tags/} + echo "TAG_NAME=${TAG_NAME}" >> $GITHUB_OUTPUT + echo "Current tag from push: ${TAG_NAME}" + fi + + - name: Checkout tag commit + run: | + TAG_NAME="${{ steps.get-tag.outputs.TAG_NAME }}" + git checkout ${TAG_NAME} || git checkout -b temp-${TAG_NAME} ${TAG_NAME} + echo "Checked out tag: ${TAG_NAME}" + + - name: Run Python script to generate release notes + id: generate_release_notes + env: + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + CURRENT_TAG: ${{ steps.get-tag.outputs.TAG_NAME }} + run: | + echo "Running generate_release_notes.py" + python scripts/generate_release_notes.py + echo "Script completed" + + - name: Debug Outputs + run: | + echo "Version: ${{ steps.generate_release_notes.outputs.version }}" + echo "Release Notes: ${{ steps.generate_release_notes.outputs.release_notes }}" + + - name: Create GitHub Release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ steps.get-tag.outputs.TAG_NAME }} + release_name: Release ${{ steps.get-tag.outputs.TAG_NAME }} - ${{ steps.generate_release_notes.outputs.version_name }} + body: ${{ steps.generate_release_notes.outputs.release_notes }} + draft: false + prerelease: ${{ contains(steps.get-tag.outputs.TAG_NAME, 'beta') || contains(steps.get-tag.outputs.TAG_NAME, 'alpha') || contains(steps.get-tag.outputs.TAG_NAME, 'rc') || contains(steps.get-tag.outputs.TAG_NAME, 'pre') }} diff --git a/.gitignore b/.gitignore index d4ba679..458649e 100644 --- a/.gitignore +++ b/.gitignore @@ -42,3 +42,7 @@ Thumbs.db # Logs logs/ *.log + +# python notebooks +*/ipynb_checkpoints/ +*.ipynb diff --git a/Dockerfile b/Dockerfile index d70e3bd..eda8c5e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,30 +1,26 @@ -# Use Python 3.11 slim image as base FROM python:3.11-slim -# Set working directory WORKDIR /app -# Set environment variables -ENV PYTHONDONTWRITEBYTECODE=1 \ - PYTHONUNBUFFERED=1 \ - PIP_NO_CACHE_DIR=1 \ - PIP_DISABLE_PIP_VERSION_CHECK=1 - # Install system dependencies RUN apt-get update && apt-get install -y \ - gcc \ + gcc curl ca-certificates\ && rm -rf /var/lib/apt/lists/* -# Copy requirements first for better caching -COPY requirements.txt . +# Download the latest installer +ADD https://astral.sh/uv/install.sh /uv-installer.sh + +# Run the installer then remove it +RUN sh /uv-installer.sh && rm /uv-installer.sh -# Install Python dependencies -RUN pip install --no-cache-dir -r requirements.txt +# Ensure the installed binary is on the `PATH` +ENV PATH="/root/.local/bin/:$PATH" -# Copy application code (including static files) COPY app/ ./app/ -COPY static/ ./static/ COPY main.py . COPY pyproject.toml . +COPY uv.lock . + +RUN uv sync --locked -ENTRYPOINT ["python", "main.py"] +ENTRYPOINT ["uv", "run", "main.py"] diff --git a/README.md b/README.md index e50bb76..92cdf48 100644 --- a/README.md +++ b/README.md @@ -1,250 +1,118 @@ # Watchly -[![ko-fi](https://ko-fi.com/img/githubbutton_sm.svg)](https://ko-fi.com/I2I81OVJEH) -[![PayPal](https://img.shields.io/badge/PayPal-00457C?style=for-the-badge&logo=paypal&logoColor=white)](https://www.paypal.com/donate/?hosted_button_id=KRQMVS34FC5KC) - **Watchly** is a Stremio catalog addon that provides personalized movie and series recommendations based on your Stremio library. It uses The Movie Database (TMDB) API to generate intelligent recommendations from the content you've watched and loved. -## What is Watchly? - -Watchly is a FastAPI-based Stremio addon that: - -- **Personalizes Recommendations**: Analyzes your Stremio library to understand your viewing preferences -- **Uses Your Loved Content**: Generates recommendations based on movies and series you've marked as "loved" in Stremio -- **Filters Watched Content**: Automatically excludes content you've already watched -- **Supports Movies & Series**: Provides recommendations for both movies and TV series -- **Genre-Based Discovery**: Offers genre-specific catalogs based on your viewing history -- **Similar Content**: Shows recommendations similar to specific titles when browsing - -## What Does It Do? - -1. **Connects to Your Stremio Library**: Securely authenticates with your Stremio account to access your library -2. **Analyzes Your Preferences**: Identifies your most loved movies and series as seed content -3. **Generates Recommendations**: Uses TMDB's recommendation engine to find similar content -4. **Filters & Scores**: Removes watched content and scores recommendations based on relevance -5. **Provides Stremio Catalogs**: Exposes catalogs that appear in your Stremio app for easy browsing - ## Features -- ✅ **Personalized Recommendations** based on your Stremio library -- ✅ **Library-Based Filtering** - excludes content you've already watched -- ✅ **IMDB ID Support** - uses standard IMDB identifiers (Stremio standard) -- ✅ **Movies & Series Support** - recommendations for both content types -- ✅ **Genre-Based Catalogs** - dynamic genre catalogs based on your preferences -- ✅ **Similar Content Discovery** - find content similar to specific titles -- ✅ **Web Configuration Interface** - easy setup through a web UI -- ✅ **Caching** - optimized performance with intelligent caching -- ✅ **Secure Tokenized Access** - credentials/auth keys never travel in URLs -- ✅ **Docker Support** - easy deployment with Docker and Docker Compose -- ✅ **Background Catalog Refresh** - automatically keeps Stremio catalogs in sync -- ✅ **Credential Validation** - verifies access details and primes catalogs before issuing tokens +- **Personalized Recommendations**: Analyzes your Stremio library to understand your viewing preferences. +- **Smart Filtering**: Automatically excludes content you've already watched. +- **Advanced Scoring**: Recommendations are intelligently weighted by recency and relevance. +- **Genre-Based Discovery**: Offers genre-specific catalogs based on your viewing history. +- **Similar Content**: Discover content similar to specific titles in your library. +- **Web Configuration**: Easy-to-use web interface for secure setup. +- **Secure Architecture**: Credentials are stored securely and never exposed in URLs. +- **Background Sync**: Keeps your catalogs updated automatically in the background. +- **Performance Optimized**: Intelligent caching for fast and reliable responses. ## Installation -### Prerequisites - -- Python 3.10 or higher -- TMDB API key ([Get one here](https://www.themoviedb.org/settings/api)) -- Stremio account credentials (username/email and password) - -### Option 1: Docker Installation (Recommended) - -#### Using Docker Compose - -1. **Clone the repository:** - ```bash - git clone https://github.com/TimilsinaBimal/Watchly.git - cd Watchly - ``` - -2. **Create a `.env` file:** - ```bash - cp .env.example .env - # Edit .env and add your credentials - ``` - -3. **Edit `.env` file with your credentials:** - ``` - TMDB_API_KEY=your_tmdb_api_key_here - PORT=8000 - ... - ``` - -4. **Start the application:** - ```bash - docker-compose up -d - ``` - -5. **Access the application:** - - API: `http://localhost:8000` - - Configuration page: `http://localhost:8000/configure` - - API Documentation: `http://localhost:8000/docs` - - -### Option 2: Manual Installation - -1. **Clone the repository:** - ```bash - git clone https://github.com/TimilsinaBimal/Watchly.git - cd Watchly - ``` - -2. **Set environment variables:** - Create a `.env` file in the project root: - ``` - TMDB_API_KEY=your_tmdb_api_key_here - PORT=8000 - ... - ``` - -3. **Install UV and Run app (recommended):** -- [Installation Instructions](https://docs.astral.sh/uv/getting-started/installation/) - ```bash - uv run main.py - ``` - -4. **Access the application:** - - API: `http://localhost:8000` - - Configuration page: `http://localhost:8000/configure` - - API Documentation: `http://localhost:8000/docs` - - -*You Can also create virtual environment and install dependencies from requirements.txt and run the app* - -## Configuration - -### Environment Variables - -| Variable | Description | Required | Default | -|----------|-------------|----------|---------| -| `TMDB_API_KEY` | Your TMDB API key | Required for catalog features (optional for `/health`) | *(empty)* | -| `PORT` | Server port | No | 8000 | -| `ADDON_ID` | Stremio addon identifier | No | com.bimal.watchly | -| `ADDON_NAME` | Human-friendly addon name shown in the manifest/UI | No | Watchly | -| `REDIS_URL` | Redis connection string for credential tokens | No | `redis://localhost:6379/0` | -| `TOKEN_SALT` | Secret salt for hashing token IDs | Yes | - (must be set in production) | -| `TOKEN_TTL_SECONDS` | Token lifetime in seconds (`0` = no expiry) | No | 0 | -| `ANNOUNCEMENT_HTML` | Optional HTML snippet rendered in the configurator banner | No | *(empty)* | -| `TMDB_ADDON_URL` | Base URL for the TMDB addon metadata proxy | No | `https://94c8cb9f702d-tmdb-addon.baby-beamup.club/...` | -| `AUTO_UPDATE_CATALOGS` | Enable periodic background catalog refreshes | No | `true` | -| `CATALOG_REFRESH_INTERVAL_SECONDS` | Interval between automatic refreshes (seconds) | No | `21600` (6h) | - -### User Configuration - -Use the web interface at `/configure` to provision a secure access token: - -1. Provide either your **Stremio username/password** *or* an **existing `authKey`** (copy from `localStorage.authKey` in [https://web.stremio.com/](https://web.stremio.com/)). -2. Choose whether to base recommendations on loved items only or include everything you've watched. -3. Watchly verifies the credentials/auth key with Stremio, performs the first catalog refresh in the background, and only then stores the payload inside Redis. -4. Your manifest URL becomes `https:////manifest.json`. Only this token ever appears in URLs. -5. Re-running the setup with the same credentials/configuration returns the exact same token. - -By default (`TOKEN_TTL_SECONDS=0`), tokens never expire. Set a positive TTL if you want automatic rotation. - -## How It Works - -1. **User Configuration**: User submits Stremio credentials or auth key via the web interface -2. **Secure Tokenization**: Credentials/auth keys are stored server-side in Redis; the user only receives a salted token -3. **Library Fetching**: When catalog is requested, service resolves the token, authenticates with Stremio, and fetches the library -4. **Seed Selection**: Uses most recent "loved" items (default: 10) as seed content -5. **Recommendation Generation**: For each seed, fetches recommendations from TMDB -6. **Filtering**: Removes items already in user's watched library -7. **Deduplication**: Combines recommendations from multiple seeds, scoring by relevance -8. **Metadata Fetching**: Fetches full metadata from TMDB addon -9. **Response**: Returns formatted catalog items compatible with Stremio - -## Project Structure - -``` -Watchly/ -├── app/ -│ ├── __init__.py # Package initialization -│ ├── core/ # Core application components -│ │ ├── __init__.py -│ │ ├── config.py # Application settings -│ │ └── app.py # FastAPI application initialization -│ ├── models/ # Pydantic models -│ │ ├── __init__.py -│ │ └── stremio.py # Stremio data models -│ ├── api/ # API routes -│ │ ├── main.py # API router -│ │ └── endpoints/ -│ │ ├── manifest.py # Stremio manifest endpoint -│ │ ├── catalogs.py # Catalog endpoints -│ │ ├── streams.py # Stream endpoints -│ │ └── caching.py # Cache management -│ ├── services/ # Business logic services -│ │ ├── tmdb_service.py # TMDB API integration -│ │ ├── stremio_service.py # Stremio API integration -│ │ ├── recommendation_service.py # Recommendation engine -│ │ └── catalog.py # Dynamic catalog service -│ └── utils.py # Utility functions -├── static/ # Static web files -│ ├── index.html # Configuration page -│ ├── style.css # Styling -│ ├── script.js # Configuration logic -│ └── logo.png # Addon logo -├── main.py # Application entry point (runs uvicorn) -├── requirements.txt # Python dependencies -├── Dockerfile # Docker image definition -├── docker-compose.yml # Docker Compose configuration -└── README.md # This file -``` +### Using Docker (Recommended) + +You can pull the latest image from the GitHub Container Registry. + +1. **Create a `docker-compose.yml` file:** + + ```yaml + services: + redis: + image: redis:7-alpine + container_name: watchly-redis + restart: unless-stopped + volumes: + - redis_data:/data + + watchly: + image: ghcr.io/timilsinabimal/watchly:latest + container_name: watchly + restart: unless-stopped + ports: + - "8000:8000" + env_file: + - .env + depends_on: + - redis + + volumes: + redis_data: + ``` + +2. **Create a `.env` file:** + + ```env + # Required + TMDB_API_KEY=your_tmdb_api_key_here + TOKEN_SALT=generate_a_random_secure_string_here + + # Optional + PORT=8000 + REDIS_URL=redis://redis:6379/0 + ADDON_ID=com.bimal.watchly + ADDON_NAME=Watchly + TOKEN_TTL_SECONDS=0 + AUTO_UPDATE_CATALOGS=true + ``` + +3. **Start the application:** + + ```bash + docker-compose up -d + ``` + +4. **Configure the addon:** + Open `http://localhost:8000/configure` in your browser to set up your Stremio credentials and install the addon. ## Development -### Running in Development Mode +To run the project locally: -```bash -uv run main.py --dev -``` +1. **Clone the repository:** + ```bash + git clone https://github.com/TimilsinaBimal/Watchly.git + cd Watchly + ``` -Or using Python directly (with auto-reload based on APP_ENV): -```bash -python main.py -``` +2. **Install dependencies:** + We recommend using [uv](https://github.com/astral-sh/uv) for fast dependency management. + ```bash + uv sync + ``` -### Health Check Endpoint +3. **Run the application:** + ```bash + uv run main.py --dev + ``` -The `/health` endpoint responds with `{ "status": "ok" }` without touching external services. This keeps container builds and probes green even when secrets like `TMDB_API_KEY` aren't supplied yet. - -### Background Catalog Updates - -Watchly now refreshes catalogs automatically using the credentials stored in Redis. By default the background worker runs every 6 hours and updates each token's catalogs directly via the Stremio API. To disable the behavior, set `AUTO_UPDATE_CATALOGS=false` (or choose a custom cadence with `CATALOG_REFRESH_INTERVAL_SECONDS`). Manual refreshes through `/{token}/catalog/update` continue to work and reuse the same logic. - -### Testing - -```bash -# Test manifest endpoint -curl http://localhost:8000/manifest.json - -# Test catalog endpoint (requires a credential token) -curl http://localhost:8000/{token}/catalog/movie/watchly.rec.json -``` - -## Security Notes +## Contributing -- **Tokenized URLs**: Manifest/catalog URLs now contain only salted tokens. Credentials/auth keys never leave the server once submitted. -- **Rotate `TOKEN_SALT`**: Treat the salt like any other secret; rotate if you suspect compromise. Changing the salt invalidates all tokens. -- **Redis Security**: Ensure your Redis instance is not exposed publicly and enable authentication if hosted remotely. -- **HTTPS Recommended**: Always use HTTPS in production to protect tokens in transit. -- **Environment Variables**: Never commit `.env` files or expose API keys in code. +We welcome contributions of all sizes! -## Troubleshooting +- **Small Bug Fixes & Improvements**: Feel free to open a Pull Request directly. +- **Major Features & Refactors**: Please **open an issue first** to discuss your proposed changes. This helps ensure your work aligns with the project's direction and saves you time. -### No recommendations appearing +## Funding & Support -- Ensure user has "loved" items in their Stremio library -- Check that TMDB API key has proper permissions -- Review application logs for errors +If you find Watchly useful, please consider supporting the project: +- [Buy me Mo:Mo](https://buymemomo.com/timilsinabimal) +- [Support on Ko-fi](https://ko-fi.com/I2I81OVJEH) +- [Donate via PayPal](https://www.paypal.com/donate/?hosted_button_id=KRQMVS34FC5KC) -## License +## Bug Reports -See [LICENSE](LICENSE) file for details. +Found a bug or have a feature request? Please [open an issue](https://github.com/TimilsinaBimal/Watchly/issues) on GitHub. -## Contributing +## Contributors -Contributions are welcome! Please feel free to submit a Pull Request. +Thank you to everyone who has contributed to the project! -## Support +## Acknowledgements -For issues and questions, please open an issue on GitHub. +Special thanks to **[The Movie Database (TMDB)](https://www.themoviedb.org/)** for providing the rich metadata that powers Watchly's recommendations. diff --git a/app/api/endpoints/announcement.py b/app/api/endpoints/announcement.py index 5f31df1..19bce86 100644 --- a/app/api/endpoints/announcement.py +++ b/app/api/endpoints/announcement.py @@ -1,6 +1,6 @@ from fastapi import APIRouter -from app.config import settings +from app.core.config import settings router = APIRouter(prefix="/announcement", tags=["announcement"]) diff --git a/app/api/endpoints/catalogs.py b/app/api/endpoints/catalogs.py index 5a8ed76..34761d4 100644 --- a/app/api/endpoints/catalogs.py +++ b/app/api/endpoints/catalogs.py @@ -1,90 +1,95 @@ +import re + from fastapi import APIRouter, HTTPException, Response from loguru import logger +from app.core.security import redact_token +from app.core.settings import UserSettings, get_default_settings from app.services.catalog_updater import refresh_catalogs_for_credentials from app.services.recommendation_service import RecommendationService from app.services.stremio_service import StremioService -from app.utils import redact_token, resolve_user_credentials +from app.services.token_store import token_store + +MAX_RESULTS = 50 +SOURCE_ITEMS_LIMIT = 15 router = APIRouter() -@router.get("/catalog/{type}/{id}.json") @router.get("/{token}/catalog/{type}/{id}.json") -async def get_catalog( - token: str | None, - type: str, - id: str, - response: Response, -): - """ - Stremio catalog endpoint for movies and series. - Returns recommendations based on user's Stremio library. - - Args: - token: Redis-backed credential token - type: 'movie' or 'series' - id: Catalog ID (e.g., 'watchly.rec') - """ +async def get_catalog(type: str, id: str, response: Response, token: str): if not token: raise HTTPException( status_code=400, detail="Missing credentials token. Please open Watchly from a configured manifest URL.", ) - logger.info(f"[{redact_token(token)}] Fetching catalog for {type} with id {id}") - - credentials = await resolve_user_credentials(token) - if type not in ["movie", "series"]: logger.warning(f"Invalid type: {type}") raise HTTPException(status_code=400, detail="Invalid type. Use 'movie' or 'series'") - if id not in ["watchly.rec"] and not id.startswith("tt") and not id.startswith("watchly.genre."): + # Supported IDs now include dynamic themes and item-based rows + if id != "watchly.rec" and not any( + id.startswith(p) for p in ("tt", "watchly.theme.", "watchly.item.", "watchly.loved.", "watchly.watched.") + ): logger.warning(f"Invalid id: {id}") raise HTTPException( status_code=400, - detail="Invalid id. Use 'watchly.rec' or 'watchly.genre.'", + detail=( # + "Invalid id. Supported: 'watchly.rec', 'watchly.theme.', 'watchly.item.', or" + " specific item IDs." + ), ) + + logger.info(f"[{redact_token(token)}] Fetching catalog for {type} with id {id}") + + credentials = await token_store.get_user_data(token) + if not credentials: + raise HTTPException(status_code=401, detail="Invalid or expired token. Please reconfigure the addon.") try: + # Extract settings from credentials + settings_dict = credentials.get("settings", {}) + user_settings = UserSettings(**settings_dict) if settings_dict else get_default_settings() + language = user_settings.language if user_settings else "en-US" + # Create services with credentials - stremio_service = StremioService( - username=credentials.get("username") or "", - password=credentials.get("password") or "", - auth_key=credentials.get("authKey"), + stremio_service = StremioService(auth_key=credentials.get("authKey")) + recommendation_service = RecommendationService( + stremio_service=stremio_service, language=language, user_settings=user_settings ) - recommendation_service = RecommendationService(stremio_service=stremio_service) - # if id starts with tt, then return recommendations for that particular item + # Handle item-based recommendations if id.startswith("tt"): recommendations = await recommendation_service.get_recommendations_for_item(item_id=id) logger.info(f"Found {len(recommendations)} recommendations for {id}") - elif id.startswith("watchly.genre."): - recommendations = await recommendation_service.get_recommendations_for_genre(genre_id=id, media_type=type) - logger.info(f"Found {len(recommendations)} recommendations for {id}") + + elif id.startswith("watchly.item.") or id.startswith("watchly.loved.") or id.startswith("watchly.watched."): + # Extract actual item ID (tt... or tmdb:...) + item_id = re.sub(r"^watchly\.(item|loved|watched)\.", "", id) + recommendations = await recommendation_service.get_recommendations_for_item(item_id=item_id) + logger.info(f"Found {len(recommendations)} recommendations for item {item_id}") + + elif id.startswith("watchly.theme."): + recommendations = await recommendation_service.get_recommendations_for_theme( + theme_id=id, content_type=type + ) + logger.info(f"Found {len(recommendations)} recommendations for theme {id}") + else: - # Get recommendations based on library - # Use config to determine if we should include watched items - include_watched = credentials.get("includeWatched", False) - # Use last 10 items as sources, get 5 recommendations per source item recommendations = await recommendation_service.get_recommendations( - content_type=type, - source_items_limit=10, - recommendations_per_source=5, - max_results=50, - include_watched=include_watched, + content_type=type, source_items_limit=SOURCE_ITEMS_LIMIT, max_results=MAX_RESULTS ) - logger.info(f"Found {len(recommendations)} recommendations for {type} (includeWatched: {include_watched})") + logger.info(f"Found {len(recommendations)} recommendations for {type}") logger.info(f"Returning {len(recommendations)} items for {type}") - # Cache catalog responses for 4 hours (14400 seconds) - response.headers["Cache-Control"] = "public, max-age=14400" + # Cache catalog responses for 4 hours + response.headers["Cache-Control"] = "public, max-age=14400" if len(recommendations) > 0 else "no-cache" return {"metas": recommendations} except HTTPException: raise except Exception as e: - logger.error(f"[{redact_token(token)}] Error fetching catalog for {type}/{id}: {e}", exc_info=True) + logger.exception(f"[{redact_token(token)}] Error fetching catalog for {type}/{id}: {e}") raise HTTPException(status_code=500, detail=str(e)) @@ -94,9 +99,9 @@ async def update_catalogs(token: str): Update the catalogs for the addon. This is a manual endpoint to update the catalogs. """ # Decode credentials from path - credentials = await resolve_user_credentials(token) + credentials = await token_store.get_user_data(token) logger.info(f"[{redact_token(token)}] Updating catalogs in response to manual request") - updated = await refresh_catalogs_for_credentials(credentials) + updated = await refresh_catalogs_for_credentials(token, credentials) logger.info(f"Manual catalog update completed: {updated}") return {"success": updated} diff --git a/app/api/endpoints/manifest.py b/app/api/endpoints/manifest.py index a84280b..6443787 100644 --- a/app/api/endpoints/manifest.py +++ b/app/api/endpoints/manifest.py @@ -1,67 +1,148 @@ from async_lru import alru_cache -from fastapi import Response +from fastapi import HTTPException, Response from fastapi.routing import APIRouter from app.core.config import settings +from app.core.settings import UserSettings, get_default_settings +from app.core.version import __version__ from app.services.catalog import DynamicCatalogService from app.services.stremio_service import StremioService -from app.utils import resolve_user_credentials +from app.services.token_store import token_store +from app.services.translation import translation_service router = APIRouter() -def get_base_manifest(): +def get_base_manifest(user_settings: UserSettings | None = None): + # Default catalog config + rec_config = None + if user_settings: + # Find config for 'recommended' + rec_config = next((c for c in user_settings.catalogs if c.id == "watchly.rec"), None) + + # If disabled explicitly, don't include it. + # If not configured (None), default to enabled. + if rec_config and not rec_config.enabled: + catalogs = [] + else: + name = rec_config.name if rec_config and rec_config.name else "Top Picks for You" + catalogs = [ + { + "type": "movie", + "id": "watchly.rec", + "name": name, + "extra": [], + }, + { + "type": "series", + "id": "watchly.rec", + "name": name, + "extra": [], + }, + ] + return { "id": settings.ADDON_ID, - "version": settings.APP_VERSION, + "version": __version__, "name": settings.ADDON_NAME, "description": "Movie and series recommendations based on your Stremio library", - "logo": "https://raw.githubusercontent.com/TimilsinaBimal/Watchly/refs/heads/main/static/logo.png", + "logo": "https://raw.githubusercontent.com/TimilsinaBimal/Watchly/refs/heads/main/app/static/logo.png", "resources": [{"name": "catalog", "types": ["movie", "series"], "idPrefixes": ["tt"]}], "types": ["movie", "series"], "idPrefixes": ["tt"], - "catalogs": [ - {"type": "movie", "id": "watchly.rec", "name": "Recommended", "extra": []}, - {"type": "series", "id": "watchly.rec", "name": "Recommended", "extra": []}, - ], + "catalogs": catalogs, "behaviorHints": {"configurable": True, "configurationRequired": False}, } # Cache catalog definitions for 1 hour (3600s) @alru_cache(maxsize=1000, ttl=3600) -async def fetch_catalogs(token: str | None = None): - if not token: - return [] - - credentials = await resolve_user_credentials(token) - stremio_service = StremioService( - username=credentials.get("username") or "", - password=credentials.get("password") or "", - auth_key=credentials.get("authKey"), - ) +async def fetch_catalogs(token: str): + credentials = await token_store.get_user_data(token) + if not credentials: + raise HTTPException(status_code=401, detail="Invalid or expired token. Please reconfigure the addon.") + + if credentials.get("settings"): + user_settings = UserSettings(**credentials["settings"]) + else: + user_settings = get_default_settings() + + stremio_service = StremioService(auth_key=credentials.get("authKey")) + # Note: get_library_items is expensive, but we need it to determine *which* genre catalogs to show. library_items = await stremio_service.get_library_items() dynamic_catalog_service = DynamicCatalogService(stremio_service=stremio_service) # Base catalogs are already in manifest, these are *extra* dynamic ones - catalogs = await dynamic_catalog_service.get_watched_loved_catalogs(library_items=library_items) - catalogs += await dynamic_catalog_service.get_genre_based_catalogs(library_items=library_items) + # Pass user_settings to filter/rename + catalogs = await dynamic_catalog_service.get_dynamic_catalogs(library_items, user_settings) return catalogs -@router.get("/manifest.json") -@router.get("/{token}/manifest.json") -async def manifest(response: Response, token: str | None = None): - """Stremio manifest endpoint with optional credential token in the path.""" - # Cache manifest for 1 day (86400 seconds) +def get_config_id(catalog) -> str | None: + catalog_id = catalog.get("id", "") + if catalog_id.startswith("watchly.theme."): + return "watchly.theme" + if catalog_id.startswith("watchly.loved."): + return "watchly.loved" + if catalog_id.startswith("watchly.watched."): + return "watchly.watched" + if catalog_id.startswith("watchly.item."): + return "watchly.item" + if catalog_id.startswith("watchly.rec"): + return "watchly.rec" + return catalog_id + + +async def _manifest_handler(response: Response, token: str): response.headers["Cache-Control"] = "public, max-age=86400" - base_manifest = get_base_manifest() - if token: - catalogs = await fetch_catalogs(token) - if catalogs: - # Append dynamic catalogs to the base ones - base_manifest["catalogs"] += catalogs + if not token: + raise HTTPException(status_code=401, detail="Missing token. Please reconfigure the addon.") + + user_settings = None + try: + creds = await token_store.get_user_data(token) + if creds.get("settings"): + user_settings = UserSettings(**creds["settings"]) + except Exception: + raise HTTPException(status_code=401, detail="Invalid or expired token. Please reconfigure the addon.") + + base_manifest = get_base_manifest(user_settings) + + fetched_catalogs = await fetch_catalogs(token) + + all_catalogs = [c.copy() for c in base_manifest["catalogs"]] + [c.copy() for c in fetched_catalogs] + + translated_catalogs = [] + + # translate to target language + if user_settings and user_settings.language: + for cat in all_catalogs: + if cat.get("name"): + cat["name"] = await translation_service.translate(cat["name"], user_settings.language) + translated_catalogs.append(cat) + else: + translated_catalogs = all_catalogs + + if user_settings: + order_map = {c.id: i for i, c in enumerate(user_settings.catalogs)} + translated_catalogs.sort(key=lambda x: order_map.get(get_config_id(x), 999)) + + base_manifest["catalogs"] = translated_catalogs + return base_manifest + + +@router.get("/manifest.json") +async def manifest(): + manifest = get_base_manifest() + # since user is not logged in, return empty catalogs + manifest["catalogs"] = [] + return manifest + + +@router.get("/{token}/manifest.json") +async def manifest_token(response: Response, token: str): + return await _manifest_handler(response, token) diff --git a/app/api/endpoints/meta.py b/app/api/endpoints/meta.py new file mode 100644 index 0000000..3387638 --- /dev/null +++ b/app/api/endpoints/meta.py @@ -0,0 +1,24 @@ +from fastapi import APIRouter, HTTPException +from loguru import logger + +from app.services.tmdb_service import TMDBService + +router = APIRouter() + + +@router.get("/api/languages") +async def get_languages(): + """ + Proxy endpoint to fetch languages from TMDB. + """ + tmdb_service = TMDBService() + try: + languages = await tmdb_service._make_request("/configuration/languages") + if not languages: + return [] + return languages + except Exception as e: + logger.error(f"Failed to fetch languages: {e}") + raise HTTPException(status_code=502, detail="Failed to fetch languages from TMDB") + finally: + await tmdb_service.close() diff --git a/app/api/endpoints/tokens.py b/app/api/endpoints/tokens.py index 7993752..53cdef5 100644 --- a/app/api/endpoints/tokens.py +++ b/app/api/endpoints/tokens.py @@ -5,22 +5,21 @@ from redis import exceptions as redis_exceptions from app.core.config import settings -from app.services.catalog_updater import refresh_catalogs_for_credentials +from app.core.security import redact_token +from app.core.settings import CatalogConfig, UserSettings, get_default_settings from app.services.stremio_service import StremioService from app.services.token_store import token_store -from app.utils import redact_token router = APIRouter(prefix="/tokens", tags=["tokens"]) class TokenRequest(BaseModel): - username: str | None = Field(default=None, description="Stremio username or email") - password: str | None = Field(default=None, description="Stremio password") - authKey: str | None = Field(default=None, description="Existing Stremio auth key") - includeWatched: bool = Field( - default=False, - description="If true, recommendations can include watched titles", - ) + authKey: str | None = Field(default=None, description="Stremio auth key") + catalogs: list[CatalogConfig] | None = Field(default=None, description="Optional catalog configuration") + language: str = Field(default="en-US", description="Language for TMDB API") + rpdb_key: str | None = Field(default=None, description="Optional RPDB API Key") + excluded_movie_genres: list[str] = Field(default_factory=list, description="List of movie genre IDs to exclude") + excluded_series_genres: list[str] = Field(default_factory=list, description="List of series genre IDs to exclude") class TokenResponse(BaseModel): @@ -34,18 +33,13 @@ class TokenResponse(BaseModel): async def _verify_credentials_or_raise(payload: dict) -> str: """Ensure the supplied credentials/auth key are valid before issuing tokens.""" - stremio_service = StremioService( - username=payload.get("username") or "", - password=payload.get("password") or "", - auth_key=payload.get("authKey"), - ) + stremio_service = StremioService(auth_key=payload.get("authKey")) try: - if payload.get("authKey") and not payload.get("username"): + if payload.get("authKey"): await stremio_service.get_addons(auth_key=payload["authKey"]) return payload["authKey"] - auth_key = await stremio_service.get_auth_key() - return auth_key + raise ValueError("Please Login using stremio account to continue!") except ValueError as exc: raise HTTPException( status_code=400, @@ -75,65 +69,66 @@ async def _verify_credentials_or_raise(payload: dict) -> str: @router.post("/", response_model=TokenResponse) async def create_token(payload: TokenRequest, request: Request) -> TokenResponse: - username = payload.username.strip() if payload.username else None - password = payload.password - auth_key = payload.authKey.strip() if payload.authKey else None - if auth_key and auth_key.startswith('"') and auth_key.endswith('"'): - auth_key = auth_key[1:-1].strip() + stremio_auth_key = payload.authKey.strip() if payload.authKey else None - if username and not password: - raise HTTPException(status_code=400, detail="Password is required when a username is provided.") + if not stremio_auth_key: + raise HTTPException(status_code=400, detail="Stremio auth key is required.") - if password and not username: - raise HTTPException( - status_code=400, - detail="Username/email is required when a password is provided.", - ) + # Remove quotes if present + if stremio_auth_key.startswith('"') and stremio_auth_key.endswith('"'): + stremio_auth_key = stremio_auth_key[1:-1].strip() - if not auth_key and not (username and password): - raise HTTPException( - status_code=400, - detail="Provide either a Stremio auth key or both username and password.", - ) + rpdb_key = payload.rpdb_key.strip() if payload.rpdb_key else None + + # 1. Fetch user info from Stremio (user_id and email) + stremio_service = StremioService(auth_key=stremio_auth_key) + try: + user_info = await stremio_service.get_user_info() + user_id = user_info["user_id"] + email = user_info.get("email", "") + except Exception as e: + raise HTTPException(status_code=400, detail=f"Failed to verify Stremio identity: {e}") + finally: + await stremio_service.close() + # 2. Check if user already exists + token = token_store.get_token_from_user_id(user_id) + existing_data = await token_store.get_user_data(token) + + # 3. Construct Settings + default_settings = get_default_settings() + + user_settings = UserSettings( + language=payload.language or default_settings.language, + catalogs=payload.catalogs if payload.catalogs else default_settings.catalogs, + rpdb_key=rpdb_key, + excluded_movie_genres=payload.excluded_movie_genres, + excluded_series_genres=payload.excluded_series_genres, + ) + + is_new_account = not existing_data + + # 4. Verify Stremio connection + verified_auth_key = await _verify_credentials_or_raise({"authKey": stremio_auth_key}) + + # 5. Prepare payload to store payload_to_store = { - "username": username, - "password": password, - "authKey": auth_key, - "includeWatched": payload.includeWatched, + "authKey": verified_auth_key, + "email": email, + "settings": user_settings.model_dump(), } - verified_auth_key = await _verify_credentials_or_raise(payload_to_store) - + # 6. Store user data try: - token, created = await token_store.store_payload(payload_to_store) - logger.info(f"[{redact_token(token)}] Token {'created' if created else 'updated'}") + token = await token_store.store_user_data(user_id, payload_to_store) + logger.info(f"[{redact_token(token)}] Account {'created' if is_new_account else 'updated'} for user {user_id}") except RuntimeError as exc: - logger.error("Token storage failed: {}", exc) - raise HTTPException( - status_code=500, - detail="Server configuration error: TOKEN_SALT must be set to a secure value.", - ) from exc + raise HTTPException(status_code=500, detail="Server configuration error.") from exc except (redis_exceptions.RedisError, OSError) as exc: - logger.error("Token storage unavailable: {}", exc) - raise HTTPException( - status_code=503, - detail="Token storage is temporarily unavailable. Please try again once Redis is reachable.", - ) from exc + raise HTTPException(status_code=503, detail="Storage temporarily unavailable.") from exc - if created: - try: - await refresh_catalogs_for_credentials(payload_to_store, auth_key=verified_auth_key) - except Exception as exc: # pragma: no cover - remote dependency - logger.error(f"[{redact_token(token)}] Initial catalog refresh failed: {{}}", exc, exc_info=True) - await token_store.delete_token(token=token) - raise HTTPException( - status_code=502, - detail="Credentials verified, but Watchly couldn't refresh your catalogs yet. Please try again.", - ) from exc base_url = settings.HOST_NAME manifest_url = f"{base_url}/{token}/manifest.json" - expires_in = settings.TOKEN_TTL_SECONDS if settings.TOKEN_TTL_SECONDS > 0 else None return TokenResponse( @@ -141,3 +136,75 @@ async def create_token(payload: TokenRequest, request: Request) -> TokenResponse manifestUrl=manifest_url, expiresInSeconds=expires_in, ) + + +async def get_stremio_user_data(payload: TokenRequest) -> tuple[str, str]: + auth_key = payload.authKey.strip() if payload.authKey else None + + if not auth_key: + raise HTTPException(status_code=400, detail="Auth Key required.") + + if auth_key.startswith('"') and auth_key.endswith('"'): + auth_key = auth_key[1:-1].strip() + + stremio_service = StremioService(auth_key=auth_key) + try: + user_info = await stremio_service.get_user_info() + user_id = user_info["user_id"] + email = user_info.get("email", "") + return user_id, email + except Exception as e: + logger.error(f"Stremio identity check failed: {e}") + raise HTTPException( + status_code=400, detail="Failed to verify Stremio identity. Your auth key might be invalid or expired." + ) + finally: + await stremio_service.close() + + +@router.post("/stremio-identity", status_code=200) +async def check_stremio_identity(payload: TokenRequest): + """Fetch user info from Stremio and check if account exists.""" + user_id, email = await get_stremio_user_data(payload) + try: + # Check existence + token = token_store.get_token_from_user_id(user_id) + user_data = await token_store.get_user_data(token) + exists = bool(user_data) + except ValueError: + exists = False + user_data = None + + response = {"user_id": user_id, "email": email, "exists": exists} + if exists and user_data: + response["settings"] = user_data.get("settings") + + return response + + +@router.delete("/", status_code=200) +async def delete_token(payload: TokenRequest): + """Delete a token based on Stremio auth key.""" + try: + user_id, _ = await get_stremio_user_data(payload) + + # Get token from user_id + token = token_store.get_token_from_user_id(user_id) + + # Verify account exists + existing_data = await token_store.get_user_data(token) + if not existing_data: + raise HTTPException(status_code=404, detail="Account not found.") + + # Delete the token + await token_store.delete_token(token) + logger.info(f"[{redact_token(token)}] Token deleted for user {user_id}") + return {"detail": "Settings deleted successfully"} + except HTTPException: + raise + except (redis_exceptions.RedisError, OSError) as exc: + logger.error("Token deletion failed: {}", exc) + raise HTTPException( + status_code=503, + detail="Token deletion is temporarily unavailable. Please try again once Redis is reachable.", + ) from exc diff --git a/app/api/main.py b/app/api/main.py index 02cf7f0..c0fd953 100644 --- a/app/api/main.py +++ b/app/api/main.py @@ -1,8 +1,10 @@ from fastapi import APIRouter +from .endpoints.announcement import router as announcement_router from .endpoints.catalogs import router as catalogs_router from .endpoints.health import router as health_router from .endpoints.manifest import router as manifest_router +from .endpoints.meta import router as meta_router from .endpoints.tokens import router as tokens_router api_router = APIRouter() @@ -17,3 +19,5 @@ async def root(): api_router.include_router(catalogs_router) api_router.include_router(tokens_router) api_router.include_router(health_router) +api_router.include_router(meta_router) +api_router.include_router(announcement_router) diff --git a/app/core/app.py b/app/core/app.py index 9652422..bdff5f7 100644 --- a/app/core/app.py +++ b/app/core/app.py @@ -1,3 +1,4 @@ +import asyncio import os from contextlib import asynccontextmanager from pathlib import Path @@ -10,8 +11,10 @@ from app.api.main import api_router from app.services.catalog_updater import BackgroundCatalogUpdater +from app.startup.migration import migrate_tokens from .config import settings +from .version import __version__ # class InterceptHandler(logging.Handler): # def emit(self, record): @@ -35,6 +38,16 @@ async def lifespan(app: FastAPI): Manage application lifespan events (startup/shutdown). """ global catalog_updater + task = asyncio.create_task(migrate_tokens()) + + # Ensure background exceptions are surfaced in logs + def _on_done(t: asyncio.Task): + try: + t.result() + except Exception as exc: + logger.error(f"migrate_tokens background task failed: {exc}") + + task.add_done_callback(_on_done) # Startup if settings.AUTO_UPDATE_CATALOGS: @@ -57,7 +70,7 @@ async def lifespan(app: FastAPI): app = FastAPI( title="Watchly", description="Stremio catalog addon for movie and series recommendations", - version=settings.APP_VERSION, + version=__version__, lifespan=lifespan, ) @@ -73,10 +86,10 @@ async def lifespan(app: FastAPI): # Static directory is at project root (3 levels up from app/core/app.py) # app/core/app.py -> app/core -> app -> root project_root = Path(__file__).resolve().parent.parent.parent -static_dir = project_root / "static" +static_dir = project_root / "app/static" if static_dir.exists(): - app.mount("/static", StaticFiles(directory=str(static_dir)), name="static") + app.mount("/app/static", StaticFiles(directory=str(static_dir)), name="static") # Serve index.html at /configure and /{token}/configure @@ -94,10 +107,12 @@ async def configure_page(token: str | None = None): announcement_html = (dynamic_announcement or "").strip() snippet = "" if announcement_html: - snippet = '\n
' f"{announcement_html}" "
" + snippet = f'\n
{announcement_html}
' html_content = html_content.replace("", snippet, 1) # Inject version - html_content = html_content.replace("", settings.APP_VERSION, 1) + html_content = html_content.replace("", __version__, 1) + # Inject host + html_content = html_content.replace("", settings.HOST_NAME, 1) return HTMLResponse(content=html_content, media_type="text/html") return HTMLResponse( content="Watchly API is running. Static files not found.", diff --git a/app/core/config.py b/app/core/config.py index 22d63b7..7996a39 100644 --- a/app/core/config.py +++ b/app/core/config.py @@ -2,6 +2,8 @@ from pydantic_settings import BaseSettings, SettingsConfigDict +from app.core.version import __version__ + class Settings(BaseSettings): """Application settings loaded from environment variables.""" @@ -17,8 +19,8 @@ class Settings(BaseSettings): PORT: int = 8000 ADDON_ID: str = "com.bimal.watchly" ADDON_NAME: str = "Watchly" - APP_VERSION: str = "0.1.5" REDIS_URL: str = "redis://redis:6379/0" + REDIS_TOKEN_KEY: str = "watchly:token:" TOKEN_SALT: str = "change-me" TOKEN_TTL_SECONDS: int = 0 # 0 = never expire ANNOUNCEMENT_HTML: str = "" @@ -29,10 +31,17 @@ class Settings(BaseSettings): {"hour": 0, "minute": 0, "id": "catalog_refresh_midnight"}, ) CATALOG_REFRESH_INTERVAL_SECONDS: int = 6 * 60 * 60 # 6 hours (used when CATALOG_UPDATE_MODE="interval") - APP_ENV: Literal["development", "production"] = "development" + APP_ENV: Literal["development", "production", "vercel"] = "development" HOST_NAME: str = "https://1ccea4301587-watchly.baby-beamup.club" RECOMMENDATION_SOURCE_ITEMS_LIMIT: int = 10 + # AI + DEFAULT_GEMINI_MODEL: str = "gemma-3-27b-it" + GEMINI_API_KEY: str | None = None + settings = Settings() + +# Get version from version.py (single source of truth) +APP_VERSION = __version__ diff --git a/app/core/constants.py b/app/core/constants.py new file mode 100644 index 0000000..9714637 --- /dev/null +++ b/app/core/constants.py @@ -0,0 +1 @@ +RECOMMENDATIONS_CATALOG_NAME: str = "Top Picks For You" diff --git a/app/core/security.py b/app/core/security.py new file mode 100644 index 0000000..276fae0 --- /dev/null +++ b/app/core/security.py @@ -0,0 +1,10 @@ +def redact_token(token: str | None) -> str: + """ + Redact a token for logging purposes. + Shows the first 6 characters followed by ***. + """ + if not token: + return "None" + if len(token) <= 6: + return token + return f"{token[:6]}***" diff --git a/app/core/settings.py b/app/core/settings.py new file mode 100644 index 0000000..6a8807e --- /dev/null +++ b/app/core/settings.py @@ -0,0 +1,33 @@ +from pydantic import BaseModel + + +class CatalogConfig(BaseModel): + id: str # "watchly.rec", "watchly.theme", "watchly.item" + name: str | None = None + enabled: bool = True + + +class UserSettings(BaseModel): + catalogs: list[CatalogConfig] + language: str = "en-US" + rpdb_key: str | None = None + excluded_movie_genres: list[str] = [] + excluded_series_genres: list[str] = [] + + +def get_default_settings() -> UserSettings: + return UserSettings( + language="en-US", + catalogs=[ + CatalogConfig(id="watchly.rec", name="Top Picks for You", enabled=True), + CatalogConfig(id="watchly.loved", name="More Like", enabled=True), + CatalogConfig(id="watchly.watched", name="Because you watched", enabled=True), + CatalogConfig(id="watchly.theme", name="Genre & Keyword Catalogs", enabled=True), + ], + ) + + +class Credentials(BaseModel): + authKey: str + email: str + settings: UserSettings diff --git a/app/core/version.py b/app/core/version.py new file mode 100644 index 0000000..5becc17 --- /dev/null +++ b/app/core/version.py @@ -0,0 +1 @@ +__version__ = "1.0.0" diff --git a/app/models/profile.py b/app/models/profile.py new file mode 100644 index 0000000..a96f6cb --- /dev/null +++ b/app/models/profile.py @@ -0,0 +1,93 @@ +from pydantic import BaseModel, Field + + +class SparseVector(BaseModel): + """ + Represents a sparse vector where keys are feature IDs and values are weights. + For countries, keys can be string codes (hashed or mapped to int if strictly int keys needed, + but let's check if we can use str keys or if we stick to int. + Original SparseVector uses `dict[int, float]`. + TMDB country codes are strings (e.g. "US"). + We can either map them to ints or change the model to support str keys. + Let's update the model to support string keys for versatility, or keep int and hash strings. + However, for Pydantic and JSON, string keys are native. + Let's change keys to string/int union or just strings (since ints are valid dict keys too). + Actually, since `genres` IDs are ints, let's allow both or specific types. + For simplicity, let's stick to `dict[str, float]` since JSON keys are strings anyway. + But wait, existing code uses ints for IDs. + Let's make a separate StringSparseVector or just genericize it. + """ + + values: dict[int, float] = Field(default_factory=dict) + + def normalize(self): + """Normalize values to 0-1 range based on the maximum value.""" + if not self.values: + return + + max_val = max(self.values.values()) + if max_val > 0: + for k in self.values: + self.values[k] = round(self.values[k] / max_val, 4) + + def get_top_features(self, limit: int = 5) -> list[tuple[int, float]]: + """Return top N features by weight.""" + sorted_items = sorted(self.values.items(), key=lambda x: x[1], reverse=True) + return sorted_items[:limit] + + +class StringSparseVector(BaseModel): + """ + Sparse vector for string-based features (like Country Codes). + """ + + values: dict[str, float] = Field(default_factory=dict) + + def normalize(self): + if not self.values: + return + max_val = max(self.values.values()) + if max_val > 0: + for k in self.values: + self.values[k] = round(self.values[k] / max_val, 4) + + def get_top_features(self, limit: int = 5) -> list[tuple[str, float]]: + sorted_items = sorted(self.values.items(), key=lambda x: x[1], reverse=True) + return sorted_items[:limit] + + +class UserTasteProfile(BaseModel): + """ + The complete user taste profile consisting of multiple sparse vectors. + """ + + genres: SparseVector = Field(default_factory=SparseVector) + keywords: SparseVector = Field(default_factory=SparseVector) + cast: SparseVector = Field(default_factory=SparseVector) + crew: SparseVector = Field(default_factory=SparseVector) + years: SparseVector = Field(default_factory=SparseVector) + countries: StringSparseVector = Field(default_factory=StringSparseVector) + + def normalize_all(self): + """Normalize all component vectors.""" + self.genres.normalize() + self.keywords.normalize() + self.cast.normalize() + self.crew.normalize() + self.years.normalize() + self.countries.normalize() + + def get_top_genres(self, limit: int = 3) -> list[tuple[int, float]]: + return self.genres.get_top_features(limit) + + def get_top_keywords(self, limit: int = 5) -> list[tuple[int, float]]: + return self.keywords.get_top_features(limit) + + def get_top_crew(self, limit: int = 2) -> list[tuple[int, float]]: + return self.crew.get_top_features(limit) + + def get_top_countries(self, limit: int = 2) -> list[tuple[str, float]]: + return self.countries.get_top_features(limit) + + def get_top_year(self, limit: int = 1) -> list[tuple[int, float]]: + return self.years.get_top_features(limit) diff --git a/app/models/scoring.py b/app/models/scoring.py new file mode 100644 index 0000000..6cc007e --- /dev/null +++ b/app/models/scoring.py @@ -0,0 +1,65 @@ +from datetime import datetime + +from pydantic import BaseModel, Field, field_validator + + +class StremioState(BaseModel): + """Represents the user state for a library item.""" + + lastWatched: datetime | None = None + timeWatched: int = 0 + timeOffset: int = 0 + overallTimeWatched: int = 0 + timesWatched: int = 0 + flaggedWatched: int = 0 + duration: int = 0 + video_id: str | None = None + watched: str | None = None + noNotif: bool = False + season: int = 0 + episode: int = 0 + + @field_validator("lastWatched", mode="before") + @classmethod + def parse_last_watched(cls, v): + if isinstance(v, str): + try: + return datetime.fromisoformat(v.replace("Z", "+00:00")) + except ValueError: + return None + return v + + +class StremioLibraryItem(BaseModel): + """Represents a raw item from Stremio library.""" + + id: str = Field(..., alias="_id") + type: str + name: str + state: StremioState = Field(default_factory=StremioState) + mtime: str = Field(default="", alias="_mtime") + poster: str | None = None + temp: bool + removed: bool + + # Enriched fields (not in raw Stremio JSON, added by our service) + is_loved: bool = Field(default=False, alias="_is_loved") + is_liked: bool = Field(default=False, alias="_is_liked") + interest_score: float = Field(default=0.0, alias="_interest_score") + + class Config: + populate_by_name = True + + +class ScoredItem(BaseModel): + """ + A processed item with calculated scores. + This is the output of the ScoringService. + """ + + item: StremioLibraryItem + score: float + completion_rate: float + is_rewatched: bool + is_recent: bool + source_type: str # 'loved' | 'watched' | 'liked' diff --git a/app/models/token.py b/app/models/token.py new file mode 100644 index 0000000..80f6b3a --- /dev/null +++ b/app/models/token.py @@ -0,0 +1,11 @@ +from pydantic import BaseModel + + +class UserSettings(BaseModel): + pass + + +class Credentials(BaseModel): + authKey: str + email: str + user_settings: UserSettings diff --git a/app/services/catalog.py b/app/services/catalog.py index ccaf387..17dbc8e 100644 --- a/app/services/catalog.py +++ b/app/services/catalog.py @@ -1,145 +1,193 @@ -import asyncio -from collections import Counter - -from loguru import logger +from datetime import datetime, timezone +from app.core.settings import CatalogConfig, UserSettings +from app.services.row_generator import RowGeneratorService +from app.services.scoring import ScoringService from app.services.stremio_service import StremioService from app.services.tmdb_service import TMDBService - -from .tmdb.genre import MOVIE_GENRE_TO_ID_MAP, SERIES_GENRE_TO_ID_MAP +from app.services.user_profile import UserProfileService class DynamicCatalogService: + """ + Generates dynamic catalog rows based on user library and preferences. + """ def __init__(self, stremio_service: StremioService): self.stremio_service = stremio_service self.tmdb_service = TMDBService() + self.scoring_service = ScoringService() + self.user_profile_service = UserProfileService() + self.row_generator = RowGeneratorService(tmdb_service=self.tmdb_service) @staticmethod def normalize_type(type_): return "series" if type_ == "tv" else type_ - def build_catalog_entry(self, item, label): + def build_catalog_entry(self, item, label, config_id): + item_id = item.get("_id", "") + # Use watchly.{config_id}.{item_id} format for better organization + if config_id in ["watchly.item", "watchly.loved", "watchly.watched"]: + # New Item-based catalog format + catalog_id = f"{config_id}.{item_id}" + elif item_id.startswith("tt") and config_id in ["watchly.loved", "watchly.watched"]: + catalog_id = f"{config_id}.{item_id}" + else: + catalog_id = item_id + + name = item.get("name") + return { "type": self.normalize_type(item.get("type")), - "id": item.get("_id"), - "name": f"Because you {label} {item.get('name')}", + "id": catalog_id, + "name": f"{label} {name}", "extra": [], } - def process_items(self, items, seen_items, seed, label): - entries = [] - for item in items: - type_ = self.normalize_type(item.get("type")) - if item.get("_id") in seen_items or seed[type_]: - continue - seen_items.add(item.get("_id")) - seed[type_] = True - entries.append(self.build_catalog_entry(item, label)) - return entries - - async def get_watched_loved_catalogs(self, library_items: list[dict]): - seen_items = set() + async def get_theme_based_catalogs( + self, library_items: list[dict], user_settings: UserSettings | None = None + ) -> list[dict]: catalogs = [] - seed = { - "watched": { - "movie": False, - "series": False, - }, - "loved": { - "movie": False, - "series": False, - }, - } + # 1. Build User Profile + # Combine loved and watched + all_items = library_items.get("loved", []) + library_items.get("watched", []) - loved_items = library_items.get("loved", []) - watched_items = library_items.get("watched", []) + # Deduplicate + unique_items = {item["_id"]: item for item in all_items} - catalogs += self.process_items(loved_items, seen_items, seed["loved"], "Loved") - catalogs += self.process_items(watched_items, seen_items, seed["watched"], "Watched") + # Score items + scored_objects = [] - return catalogs + # Use only recent history for freshness + sorted_history = sorted(unique_items.values(), key=lambda x: x.get("_mtime", ""), reverse=True) + recent_history = sorted_history[:30] + + for item_data in recent_history: + scored_obj = self.scoring_service.process_item(item_data) + scored_objects.append(scored_obj) - async def _get_item_genres(self, item_id: str, item_type: str) -> list[str]: - """Fetch genres for a specific item from TMDB.""" - try: - # Convert IMDB ID to TMDB ID - tmdb_id = None - media_type = "movie" if item_type == "movie" else "tv" - - if item_id.startswith("tt"): - tmdb_id, _ = await self.tmdb_service.find_by_imdb_id(item_id) - elif item_id.startswith("tmdb:"): - tmdb_id = int(item_id.split(":")[1]) - - if not tmdb_id: - return [] - - # Fetch details - if media_type == "movie": - details = await self.tmdb_service.get_movie_details(tmdb_id) - else: - details = await self.tmdb_service.get_tv_details(tmdb_id) - - return [g.get("name") for g in details.get("genres", [])] - except Exception as e: - logger.warning(f"Failed to fetch genres for {item_id}: {e}") - return [] - - async def get_genre_based_catalogs(self, library_items: list[dict]): - # get separate movies and series lists from loved items - loved_movies = [item for item in library_items.get("loved", []) if item.get("type") == "movie"] - loved_series = [item for item in library_items.get("loved", []) if item.get("type") == "series"] - - # only take last 5 results from loved movies and series - loved_movies = loved_movies[:5] - loved_series = loved_series[:5] - - # fetch genres concurrently - movie_tasks = [self._get_item_genres(item.get("_id").strip(), "movie") for item in loved_movies] - series_tasks = [self._get_item_genres(item.get("_id").strip(), "series") for item in loved_series] - - movie_genres_list = await asyncio.gather(*movie_tasks) - series_genres_list = await asyncio.gather(*series_tasks) - - # now flatten list and count the occurance of each genre for both movies and series separately - movie_genre_counts = Counter( - [genre for sublist in movie_genres_list for genre in sublist if genre in MOVIE_GENRE_TO_ID_MAP] + # Get excluded genres + excluded_movie_genres = [] + excluded_series_genres = [] + if user_settings: + excluded_movie_genres = [int(g) for g in user_settings.excluded_movie_genres] + excluded_series_genres = [int(g) for g in user_settings.excluded_series_genres] + + # 2. Generate Thematic Rows with Type-Specific Profiles + # Generate for Movies + movie_profile = await self.user_profile_service.build_user_profile( + scored_objects, content_type="movie", excluded_genres=excluded_movie_genres ) - series_genre_counts = Counter( - [genre for sublist in series_genres_list for genre in sublist if genre in SERIES_GENRE_TO_ID_MAP] + movie_rows = await self.row_generator.generate_rows(movie_profile, "movie") + + for row in movie_rows: + # translated_title = await translation_service.translate(row.title, lang) + catalogs.append({"type": "movie", "id": row.id, "name": row.title, "extra": []}) + + # Generate for Series + series_profile = await self.user_profile_service.build_user_profile( + scored_objects, content_type="series", excluded_genres=excluded_series_genres ) - sorted_movie_genres = sorted(movie_genre_counts.items(), key=lambda x: x[1], reverse=True) - sorted_series_genres = sorted(series_genre_counts.items(), key=lambda x: x[1], reverse=True) + series_rows = await self.row_generator.generate_rows(series_profile, "series") + + for row in series_rows: + # translated_title = await translation_service.translate(row.title, lang) + catalogs.append({"type": "series", "id": row.id, "name": row.title, "extra": []}) - # now get the top 2 genres for movies and series - top_2_movie_genre_names = [genre for genre, _ in sorted_movie_genres[:2]] - top_2_series_genre_names = [genre for genre, _ in sorted_series_genres[:2]] + return catalogs - # convert id to name - top_2_movie_genres = [str(MOVIE_GENRE_TO_ID_MAP[genre_name]) for genre_name in top_2_movie_genre_names] - top_2_series_genres = [str(SERIES_GENRE_TO_ID_MAP[genre_name]) for genre_name in top_2_series_genre_names] + async def get_dynamic_catalogs( + self, library_items: list[dict], user_settings: UserSettings | None = None + ) -> list[dict]: + """ + Generate all dynamic catalog rows. + """ catalogs = [] + lang = user_settings.language if user_settings else "en-US" + + # Theme Based + theme_config = next((c for c in user_settings.catalogs if c.id == "watchly.theme"), None) - if top_2_movie_genres: - catalogs.append( - { - "type": "movie", - "id": f"watchly.genre.{'_'.join(top_2_movie_genres)}", - "name": "You might also Like", - "extra": [], - } - ) - - if top_2_series_genres: - catalogs.append( - { - "type": "series", - "id": f"watchly.genre.{'_'.join(top_2_series_genres)}", - "name": "You might also Like", - "extra": [], - } - ) + if theme_config and theme_config.enabled: + catalogs.extend(await self.get_theme_based_catalogs(library_items, user_settings)) + + # Item Based (Loved/Watched) + loved_config = next((c for c in user_settings.catalogs if c.id == "watchly.loved"), None) + watched_config = next((c for c in user_settings.catalogs if c.id == "watchly.watched"), None) + + # Fallback for old settings (watchly.item) + if not loved_config and not watched_config: + old_config = next((c for c in user_settings.catalogs if c.id == "watchly.item"), None) + if old_config and old_config.enabled: + # Create temporary configs + loved_config = CatalogConfig(id="watchly.loved", name=None, enabled=True) + watched_config = CatalogConfig(id="watchly.watched", name=None, enabled=True) + + # Movies + await self._add_item_based_rows(catalogs, library_items, "movie", lang, loved_config, watched_config) + # Series + await self._add_item_based_rows(catalogs, library_items, "series", lang, loved_config, watched_config) return catalogs + + async def _add_item_based_rows( + self, + catalogs: list, + library_items: dict, + content_type: str, + language: str, + loved_config, + watched_config, + ): + """Helper to add 'Because you watched' and 'More like' rows.""" + + # Helper to parse date + def get_date(item): + + val = item.get("state", {}).get("lastWatched") + if val: + try: + if isinstance(val, str): + return datetime.fromisoformat(val.replace("Z", "+00:00")) + return val + except (ValueError, TypeError): + pass + # Fallback to mtime + val = item.get("_mtime") + if val: + try: + return datetime.fromisoformat(str(val).replace("Z", "+00:00")) + except (ValueError, TypeError): + pass + return datetime.min.replace(tzinfo=timezone.utc) + + # 1. More Like + last_loved = None # Initialize for the watched check + if loved_config and loved_config.enabled: + loved = [i for i in library_items.get("loved", []) if i.get("type") == content_type] + loved.sort(key=get_date, reverse=True) + + last_loved = loved[0] if loved else None + if last_loved: + label = loved_config.name + + catalogs.append(self.build_catalog_entry(last_loved, label, "watchly.loved")) + + # 2. Because you watched + if watched_config and watched_config.enabled: + watched = [i for i in library_items.get("watched", []) if i.get("type") == content_type] + watched.sort(key=get_date, reverse=True) + + last_watched = None + for item in watched: + # Avoid duplicate row if it's the same item as 'More like' + if last_loved and item.get("_id") == last_loved.get("_id"): + continue + last_watched = item + break + + if last_watched: + label = watched_config.name + + catalogs.append(self.build_catalog_entry(last_watched, label, "watchly.watched")) diff --git a/app/services/catalog_updater.py b/app/services/catalog_updater.py index 15e0972..53a5db3 100644 --- a/app/services/catalog_updater.py +++ b/app/services/catalog_updater.py @@ -4,48 +4,65 @@ from apscheduler.schedulers.asyncio import AsyncIOScheduler from apscheduler.triggers.cron import CronTrigger from apscheduler.triggers.interval import IntervalTrigger +from fastapi import HTTPException from loguru import logger from app.core.config import settings +from app.core.security import redact_token +from app.core.settings import UserSettings, get_default_settings from app.services.catalog import DynamicCatalogService from app.services.stremio_service import StremioService from app.services.token_store import token_store -from app.utils import redact_token +from app.services.translation import translation_service # Max number of concurrent updates to prevent overwhelming external APIs MAX_CONCURRENT_UPDATES = 5 -async def refresh_catalogs_for_credentials( - credentials: dict[str, Any], auth_key: str | None = None, key: str | None = None -) -> bool: - """Regenerate catalogs for the provided credentials and push them to Stremio.""" - stremio_service = StremioService( - username=credentials.get("username") or "", - password=credentials.get("password") or "", - auth_key=auth_key or credentials.get("authKey"), - ) +async def refresh_catalogs_for_credentials(token: str, credentials: dict[str, Any]) -> bool: + if not credentials: + logger.warning(f"[{redact_token(token)}] Attempted to refresh catalogs with no credentials.") + raise HTTPException(status_code=401, detail="Invalid or expired token. Please reconfigure the addon.") + + auth_key = credentials.get("authKey") + stremio_service = StremioService(auth_key=auth_key) # check if user has addon installed or not try: - addon_installed = await stremio_service.is_addon_installed() + addon_installed = await stremio_service.is_addon_installed(auth_key) if not addon_installed: - logger.info("User has not installed addon. Removing token from redis") - await token_store.delete_token(key=key) + logger.info(f"[{redact_token(token)}] User has not installed addon. Removing token from redis") + # Ensure we delete by token, not by raw Redis key + await token_store.delete_token(token=token) return True except Exception as e: - logger.exception(f"Failed to check if addon is installed: {e}") + logger.exception(f"[{redact_token(token)}] Failed to check if addon is installed: {e}") try: library_items = await stremio_service.get_library_items() dynamic_catalog_service = DynamicCatalogService(stremio_service=stremio_service) - catalogs = await dynamic_catalog_service.get_watched_loved_catalogs(library_items=library_items) - catalogs += await dynamic_catalog_service.get_genre_based_catalogs(library_items=library_items) - auth_key_or_username = credentials.get("authKey") or credentials.get("username") - redacted = redact_token(auth_key_or_username) if auth_key_or_username else "unknown" - logger.info(f"[{redacted}] Prepared {len(catalogs)} catalogs") - auth_key = await stremio_service.get_auth_key() + # Ensure user_settings is available + user_settings = get_default_settings() + if credentials.get("settings"): + try: + user_settings = UserSettings(**credentials["settings"]) + except Exception as e: + user_settings = get_default_settings() + logger.warning(f"[{redact_token(token)}] Failed to parse user settings from credentials: {e}") + + catalogs = await dynamic_catalog_service.get_dynamic_catalogs( + library_items=library_items, user_settings=user_settings + ) + + if user_settings and user_settings.language: + for cat in catalogs: + if name := cat.get("name"): + cat["name"] = await translation_service.translate(name, user_settings.language) + logger.info(f"[{redact_token(token)}] Prepared {len(catalogs)} catalogs") return await stremio_service.update_catalogs(catalogs, auth_key) + except Exception as e: + logger.exception(f"[{redact_token(token)}] Failed to update catalogs: {e}", exc_info=True) + raise e finally: await stremio_service.close() @@ -108,24 +125,27 @@ async def refresh_all_tokens(self) -> None: sem = asyncio.Semaphore(MAX_CONCURRENT_UPDATES) async def _update_safe(key: str, payload: dict[str, Any]) -> None: - if not self._has_credentials(payload): + if not payload.get("authKey"): logger.debug( - f"Skipping token {self._mask_key(key)} with incomplete credentials", + f"Skipping token {redact_token(key)} with incomplete credentials", ) return async with sem: try: - updated = await refresh_catalogs_for_credentials(payload, key=key) + updated = await refresh_catalogs_for_credentials(key, payload) logger.info( - f"Background refresh for {self._mask_key(key)} completed (updated={updated})", + f"Background refresh for {redact_token(key)} completed (updated={updated})", ) except Exception as exc: - logger.error(f"Background refresh failed for {self._mask_key(key)}: {exc}", exc_info=True) + logger.error(f"Background refresh failed for {redact_token(key)}: {exc}", exc_info=True) try: async for key, payload in token_store.iter_payloads(): - tasks.append(asyncio.create_task(_update_safe(key, payload))) + # Extract token from redis key prefix + prefix = token_store.KEY_PREFIX + tok = key[len(prefix) :] if key.startswith(prefix) else key # noqa + tasks.append(asyncio.create_task(_update_safe(tok, payload))) if tasks: logger.info(f"Starting background refresh for {len(tasks)} tokens...") @@ -136,12 +156,3 @@ async def _update_safe(key: str, payload: dict[str, Any]) -> None: except Exception as exc: logger.error(f"Catalog refresh scan failed: {exc}", exc_info=True) - - @staticmethod - def _has_credentials(payload: dict[str, Any]) -> bool: - return bool(payload.get("authKey") or (payload.get("username") and payload.get("password"))) - - @staticmethod - def _mask_key(key: str) -> str: - suffix = key.split(":")[-1] - return f"***{suffix[-6:]}" diff --git a/app/services/discovery.py b/app/services/discovery.py new file mode 100644 index 0000000..b52d485 --- /dev/null +++ b/app/services/discovery.py @@ -0,0 +1,190 @@ +import asyncio + +from app.models.profile import UserTasteProfile +from app.services.tmdb_service import TMDBService + + +class DiscoveryEngine: + """ + Service to discover content based on User Taste Profile. + Uses TMDB Discovery API with weighted query parameters derived from the user profile. + """ + + def __init__(self): + self.tmdb_service = TMDBService() + # Limit concurrent discovery calls to avoid rate limiting + self._sem = asyncio.Semaphore(10) + + async def discover_recommendations( + self, + profile: UserTasteProfile, + content_type: str, + limit: int = 20, + excluded_genres: list[int] | None = None, + ) -> list[dict]: + """ + Find content that matches the user's taste profile. + Strategy: + 1. Extract top weighted Genres, Keywords, Actors, Director. + 2. Build specific 'Discovery Queries' for each category. + 3. Fetch results in parallel. + 4. Return the combined candidate set (B). + """ + # 1. Extract Top Features + top_genres = profile.get_top_genres(limit=3) # e.g. [(28, 1.0), (878, 0.8)] + top_keywords = profile.get_top_keywords(limit=3) # e.g. [(123, 0.9)] + # Need to add get_top_cast to UserTasteProfile model first, assuming it exists or using profile.cast directly + # Based on previous step, profile.cast exists. + top_cast = profile.cast.get_top_features(limit=2) + top_crew = profile.get_top_crew(limit=1) # e.g. [(555, 1.0)] - Director + + top_countries = profile.get_top_countries(limit=2) + top_year = profile.get_top_year(limit=1) + + if not top_genres and not top_keywords and not top_cast: + # Fallback if profile is empty + return [] + + tasks = [] + base_params = {} + if excluded_genres: + base_params["without_genres"] = "|".join([str(g) for g in excluded_genres]) + + # Query 1: Top Genres Mix + if top_genres: + genre_ids = "|".join([str(g[0]) for g in top_genres]) + params_popular = { + "with_genres": genre_ids, + "sort_by": "popularity.desc", + "vote_count.gte": 500, + **base_params, + } + tasks.append(self._fetch_discovery(content_type, params_popular)) + + # fetch atleast two pages of results + for i in range(2): + params_rating = { + "with_genres": genre_ids, + "sort_by": "vote_average.desc", + "vote_count.gte": 500, + "page": i + 1, + **base_params, + } + tasks.append(self._fetch_discovery(content_type, params_rating)) + + # Query 2: Top Keywords + if top_keywords: + keyword_ids = "|".join([str(k[0]) for k in top_keywords]) + params_keywords = { + "with_keywords": keyword_ids, + "sort_by": "popularity.desc", + "vote_count.gte": 500, + **base_params, + } + tasks.append(self._fetch_discovery(content_type, params_keywords)) + + # fetch atleast two pages of results + for i in range(3): + params_rating = { + "with_keywords": keyword_ids, + "sort_by": "vote_average.desc", + "vote_count.gte": 500, + "page": i + 1, + **base_params, + } + tasks.append(self._fetch_discovery(content_type, params_rating)) + + # Query 3: Top Actors + for actor in top_cast: + actor_id = actor[0] + params_actor = { + "with_cast": str(actor_id), + "sort_by": "popularity.desc", + "vote_count.gte": 500, + **base_params, + } + tasks.append(self._fetch_discovery(content_type, params_actor)) + + params_rating = { + "with_cast": str(actor_id), + "sort_by": "vote_average.desc", + "vote_count.gte": 500, + **base_params, + } + tasks.append(self._fetch_discovery(content_type, params_rating)) + + # Query 4: Top Director + if top_crew: + director_id = top_crew[0][0] + params_director = { + "with_crew": str(director_id), + "sort_by": "vote_average.desc", # Directors imply quality preference + "vote_count.gte": 500, + **base_params, + } + tasks.append(self._fetch_discovery(content_type, params_director)) + + params_rating = { + "with_crew": str(director_id), + "sort_by": "vote_average.desc", + "vote_count.gte": 500, + **base_params, + } + tasks.append(self._fetch_discovery(content_type, params_rating)) + + # Query 5: Top Countries + if top_countries: + country_ids = "|".join([str(c[0]) for c in top_countries]) + params_country = { + "with_origin_country": country_ids, + "sort_by": "popularity.desc", + "vote_count.gte": 100, + **base_params, + } + tasks.append(self._fetch_discovery(content_type, params_country)) + + params_rating = { + "with_origin_country": country_ids, + "sort_by": "vote_average.desc", + "vote_count.gte": 300, + **base_params, + } + tasks.append(self._fetch_discovery(content_type, params_rating)) + + # query 6: Top year + if top_year: + year = top_year[0][0] + # we store year in 10 years bucket + start_year = f"{year}-01-01" + end_year = f"{int(year) + 9}-12-31" + params_rating = { + "primary_release_date.gte": start_year, + "primary_release_date.lte": end_year, + "sort_by": "vote_average.desc", + "vote_count.gte": 500, + **base_params, + } + tasks.append(self._fetch_discovery(content_type, params_rating)) + + # 3. Execute Parallel Queries + results_batches = await asyncio.gather(*tasks, return_exceptions=True) + + # 4. Aggregate and Deduplicate + all_candidates = {} + for batch in results_batches: + if isinstance(batch, Exception) or not batch: + continue + for item in batch: + if item["id"] not in all_candidates: + all_candidates[item["id"]] = item + + return list(all_candidates.values()) + + async def _fetch_discovery(self, media_type: str, params: dict) -> list[dict]: + """Helper to call TMDB discovery.""" + try: + async with self._sem: + data = await self.tmdb_service.get_discover(media_type, **params) + return data.get("results", []) + except Exception: + return [] diff --git a/app/services/gemini.py b/app/services/gemini.py new file mode 100644 index 0000000..8a96f7d --- /dev/null +++ b/app/services/gemini.py @@ -0,0 +1,63 @@ +import asyncio + +from google import genai +from loguru import logger + +from app.core.config import settings + + +class GeminiService: + def __init__(self, model: str = settings.DEFAULT_GEMINI_MODEL): + self.model = model + self.client = None + if api_key := settings.GEMINI_API_KEY: + try: + self.client = genai.Client(api_key=api_key) + except Exception as e: + logger.warning(f"Failed to initialize Gemini client: {e}") + else: + logger.warning("GEMINI_API_KEY not set. Gemini features will be disabled.") + + @staticmethod + def get_prompt(): + return """ + You are a content catalog naming expert. + Given filters like genre, keywords, countries, or years, generate natural, + engaging catalog row titles that streaming platforms would use. + + Examples: + - Genre: Action, Country: South Korea → "Korean Action Thrillers" + - Keyword: "space", Genre: Sci-Fi → "Space Exploration Adventures" + - Genre: Drama, Country: France → "Acclaimed French Cinema" + - Country: "USA" + Genre: "Sci-Fi and Fantasy" → "Hollywood Sci-Fi and Fantasy" + - Keywords: "revenge" + "martial arts" → "Revenge & Martial Arts" + + Keep titles: + - Short (2-5 words) + - Natural and engaging + - Focused on what makes the content appealing + - Only return a single best title and nothing else. + """ + + def generate_content(self, prompt: str) -> str: + system_prompt = self.get_prompt() + if not self.client: + logger.warning("Gemini client not initialized. Gemini features will be disabled.") + return "" + try: + response = self.client.models.generate_content( + model=self.model, + contents=system_prompt + "\n\n" + prompt, + ) + return response.text.strip() + except Exception as e: + logger.error(f"Error generating content: {e}") + return "" + + async def generate_content_async(self, prompt: str) -> str: + """Async wrapper to avoid blocking the event loop during network calls.""" + loop = asyncio.get_running_loop() + return await loop.run_in_executor(None, lambda: self.generate_content(prompt)) + + +gemini_service = GeminiService() diff --git a/app/services/recommendation_service.py b/app/services/recommendation_service.py index 32f1530..a11170b 100644 --- a/app/services/recommendation_service.py +++ b/app/services/recommendation_service.py @@ -1,10 +1,25 @@ import asyncio +import random from urllib.parse import unquote from loguru import logger +from app.core.settings import UserSettings +from app.services.discovery import DiscoveryEngine +from app.services.rpdb import RPDBService +from app.services.scoring import ScoringService from app.services.stremio_service import StremioService from app.services.tmdb_service import TMDBService +from app.services.user_profile import UserProfileService + + +def normalize(value, min_v=0, max_v=10): + """ + Normalize popularity / rating when blending. + """ + if max_v == min_v: + return 0 + return (value - min_v) / (max_v - min_v) def _parse_identifier(identifier: str) -> tuple[str | None, int | None]: @@ -36,21 +51,88 @@ def _parse_identifier(identifier: str) -> tuple[str | None, int | None]: class RecommendationService: """ Service for generating recommendations based on user's Stremio library. - - The recommendation flow: - 1. Get user's loved and watched items from Stremio library - 2. Use loved items as "source items" to find similar content from TMDB - 3. Filter out items already in the user's watched library - 4. Fetch full metadata from TMDB - 5. Return formatted recommendations + Implements a Hybrid Recommendation System (Similarity + Discovery). """ - def __init__(self, stremio_service: StremioService | None = None): + def __init__( + self, + stremio_service: StremioService | None = None, + language: str = "en-US", + user_settings: UserSettings | None = None, + ): if stremio_service is None: raise ValueError("StremioService instance is required for personalized recommendations") - self.tmdb_service = TMDBService() + self.tmdb_service = TMDBService(language=language) self.stremio_service = stremio_service + self.scoring_service = ScoringService() + self.user_profile_service = UserProfileService() + self.discovery_engine = DiscoveryEngine() self.per_item_limit = 20 + self.user_settings = user_settings + + async def _get_exclusion_sets(self, content_type: str | None = None) -> tuple[set[str], set[int]]: + """ + Fetch library items and build strict exclusion sets for watched content. + Also exclude items the user has added to library to avoid recommending duplicates. + Returns (watched_imdb_ids, watched_tmdb_ids) + """ + # Always fetch fresh library to ensure we don't recommend what was just watched + library_data = await self.stremio_service.get_library_items() + # Combine loved, watched, added, and removed (added/removed treated as exclude-only) + all_items = ( + library_data.get("loved", []) + + library_data.get("watched", []) + + library_data.get("added", []) + + library_data.get("removed", []) + ) + + imdb_ids = set() + tmdb_ids = set() + + for item in all_items: + # Optional: filter by type if provided, but safer to exclude all types to avoid cross-contamination + # if content_type and item.get("type") != content_type: continue + + item_id = item.get("_id", "") + imdb_id, tmdb_id = _parse_identifier(item_id) + + if imdb_id: + imdb_ids.add(imdb_id) + if tmdb_id: + tmdb_ids.add(tmdb_id) + + # Also handle raw IDs if parse failed but it looks like one + if item_id.startswith("tt"): + imdb_ids.add(item_id) + elif item_id.startswith("tmdb:"): + try: + tmdb_ids.add(int(item_id.split(":")[1])) + except Exception: + pass + + return imdb_ids, tmdb_ids + + async def _filter_candidates( + self, candidates: list[dict], watched_imdb_ids: set[str], watched_tmdb_ids: set[int] + ) -> list[dict]: + """ + Filter candidates against watched sets using TMDB ID first, then IMDB ID (if available). + """ + filtered = [] + for item in candidates: + tmdb_id = item.get("id") + # 1. Check TMDB ID (Fast) + if tmdb_id and tmdb_id in watched_tmdb_ids: + continue + + # 2. Check external IDs (if present in candidate) + external_ids = item.get("external_ids", {}) + imdb_id = external_ids.get("imdb_id") + if imdb_id and imdb_id in watched_imdb_ids: + continue + + filtered.append(item) + return filtered async def _fetch_metadata_for_items(self, items: list[dict], media_type: str) -> list[dict]: """ @@ -60,12 +142,15 @@ async def _fetch_metadata_for_items(self, items: list[dict], media_type: str) -> # Ensure media_type is correct query_media_type = "movie" if media_type == "movie" else "tv" + sem = asyncio.Semaphore(30) + async def _fetch_details(tmdb_id: int): try: - if query_media_type == "movie": - return await self.tmdb_service.get_movie_details(tmdb_id) - else: - return await self.tmdb_service.get_tv_details(tmdb_id) + async with sem: + if query_media_type == "movie": + return await self.tmdb_service.get_movie_details(tmdb_id) + else: + return await self.tmdb_service.get_tv_details(tmdb_id) except Exception as e: logger.warning(f"Failed to fetch details for TMDB ID {tmdb_id}: {e}") return None @@ -87,10 +172,13 @@ async def _fetch_details(tmdb_id: int): # Extract IMDB ID from external_ids external_ids = details.get("external_ids", {}) imdb_id = external_ids.get("imdb_id") - tmdb_id = details.get("id") + # tmdb_id = details.get("id") # Prefer IMDB ID, fallback to TMDB ID - stremio_id = imdb_id if imdb_id else f"tmdb:{tmdb_id}" + if imdb_id: + stremio_id = imdb_id + else: # skip content if imdb id is not available + continue # Construct Stremio meta object title = details.get("title") or details.get("name") @@ -104,16 +192,24 @@ async def _fetch_details(tmdb_id: int): release_date = details.get("release_date") or details.get("first_air_date") or "" year = release_date[:4] if release_date else None + if self.user_settings and self.user_settings.rpdb_key: + poster_url = RPDBService.get_poster_url(self.user_settings.rpdb_key, stremio_id) + else: + poster_url = f"https://image.tmdb.org/t/p/w500{poster_path}" if poster_path else None + meta_data = { "id": stremio_id, + "imdb_id": stremio_id, "type": "series" if media_type in ["tv", "series"] else "movie", "name": title, - "poster": f"https://image.tmdb.org/t/p/w500{poster_path}" if poster_path else None, + "poster": poster_url, "background": f"https://image.tmdb.org/t/p/original{backdrop_path}" if backdrop_path else None, "description": details.get("overview"), "releaseInfo": year, "imdbRating": str(details.get("vote_average", "")), "genres": [g.get("name") for g in details.get("genres", [])], + # pass internal external_ids for post-filtering if needed + "_external_ids": external_ids, } # Add runtime if available (Movie) or episode run time (TV) @@ -131,11 +227,18 @@ async def _fetch_details(tmdb_id: int): async def get_recommendations_for_item(self, item_id: str) -> list[dict]: """ Get recommendations for a specific item by IMDB ID. - - This is used when user clicks on a specific item to see "similar" recommendations. - No library filtering is applied - we show all recommendations. + STRICT FILTERING: Excludes watched items. """ - # Convert IMDB ID to TMDB ID (needed for TMDB recommendations API) + # Fetch Exclusion Sets first + watched_imdb, watched_tmdb = await self._get_exclusion_sets() + + # Ensure the source item itself is excluded + if item_id.startswith("tt"): + watched_imdb.add(item_id) + elif item_id.startswith("tmdb:"): + watched_tmdb.add(int(item_id.split(":")[1])) + + # Convert IMDB ID to TMDB ID if item_id.startswith("tt"): tmdb_id, media_type = await self.tmdb_service.find_by_imdb_id(item_id) if not tmdb_id: @@ -143,22 +246,128 @@ async def get_recommendations_for_item(self, item_id: str) -> list[dict]: return [] else: tmdb_id = item_id.split(":")[1] - # Default to movie if we can't determine type from ID - media_type = "movie" + media_type = "movie" # Default - # Safety check if not media_type: media_type = "movie" - # Get recommendations (empty sets mean no library filtering) - recommendations = await self._fetch_recommendations_from_tmdb(str(tmdb_id), media_type, self.per_item_limit) + # Fetch more candidates to account for filtering + # We want 20 final, so fetch 40 + buffer_limit = self.per_item_limit * 2 + recommendations = await self._fetch_recommendations_from_tmdb(str(tmdb_id), media_type, buffer_limit) if not recommendations: - logger.warning(f"No recommendations found for {item_id}") return [] - logger.info(f"Found {len(recommendations)} recommendations for {item_id}") - return await self._fetch_metadata_for_items(recommendations, media_type) + # 1. Filter by TMDB ID + recommendations = await self._filter_candidates(recommendations, watched_imdb, watched_tmdb) + + # 1.5 Filter by Excluded Genres + # We need to detect content_type from item_id or media_type to know which exclusion list to use. + # media_type is already resolved above. + excluded_ids = set(self._get_excluded_genre_ids(media_type)) + + if excluded_ids: + recommendations = [ + item for item in recommendations if not excluded_ids.intersection(item.get("genre_ids") or []) + ] + + # 2. Fetch Metadata (gets IMDB IDs) + meta_items = await self._fetch_metadata_for_items(recommendations, media_type) + + # 3. Strict Filter by IMDB ID (using metadata) + final_items = [] + for item in meta_items: + # check ID (stremio_id) which is usually imdb_id + if item["id"] in watched_imdb: + continue + # check hidden external_ids if available + ext_ids = item.get("_external_ids", {}) + if ext_ids.get("imdb_id") in watched_imdb: + continue + + # Clean up internal fields + item.pop("_external_ids", None) + final_items.append(item) + + if len(final_items) >= self.per_item_limit: + break + + logger.info(f"Found {len(final_items)} valid recommendations for {item_id}") + return final_items + + def _get_excluded_genre_ids(self, content_type: str) -> list[int]: + if not self.user_settings: + return [] + if content_type == "movie": + return [int(g) for g in self.user_settings.excluded_movie_genres] + elif content_type in ["series", "tv"]: + return [int(g) for g in self.user_settings.excluded_series_genres] + return [] + + async def get_recommendations_for_theme(self, theme_id: str, content_type: str, limit: int = 20) -> list[dict]: + """ + Parse a dynamic theme ID and fetch recommendations. + Format: watchly.theme.g[-].k[-].ct... + """ + # Parse params from ID + params = {} + parts = theme_id.replace("watchly.theme.", "").split(".") + + for part in parts: + if part.startswith("g"): + # Genres: g878-53 -> 878,53 + genre_str = part[1:].replace("-", ",") + params["with_genres"] = genre_str.replace(",", "|") + elif part.startswith("k"): + # Keywords: k123-456 + kw_str = part[1:].replace("-", "|") + params["with_keywords"] = kw_str + elif part.startswith("ct"): + # Country: ctUS + params["with_origin_country"] = part[2:] + elif part.startswith("y"): + # Year/Decade: y1990 -> 1990-01-01 to 1999-12-31 + try: + year = int(part[1:]) + params["primary_release_date.gte"] = f"{year}-01-01" + params["primary_release_date.lte"] = f"{year+9}-12-31" + except ValueError: + pass + elif part == "sort-vote": + params["sort_by"] = "vote_average.desc" + params["vote_count.gte"] = 200 + + # Default Sort + if "sort_by" not in params: + params["sort_by"] = "popularity.desc" + + # Apply Excluded Genres + excluded_ids = self._get_excluded_genre_ids(content_type) + if excluded_ids: + params["without_genres"] = "|".join(str(g) for g in excluded_ids) + + # Fetch + recommendations = await self.tmdb_service.get_discover(content_type, **params) + candidates = recommendations.get("results", []) + + # Strict Filtering + watched_imdb, watched_tmdb = await self._get_exclusion_sets() + filtered = await self._filter_candidates(candidates, watched_imdb, watched_tmdb) + + # Meta + meta_items = await self._fetch_metadata_for_items(filtered[: limit * 2], content_type) + + final_items = [] + for item in meta_items: + if item["id"] in watched_imdb: + continue + if item.get("_external_ids", {}).get("imdb_id") in watched_imdb: + continue + item.pop("_external_ids", None) + final_items.append(item) + + return final_items async def _fetch_recommendations_from_tmdb(self, item_id: str, media_type: str, limit: int) -> list[dict]: """ @@ -170,205 +379,219 @@ async def _fetch_recommendations_from_tmdb(self, item_id: str, media_type: str, if item_id.startswith("tt"): tmdb_id, detected_type = await self.tmdb_service.find_by_imdb_id(item_id) if not tmdb_id: - logger.warning(f"No TMDB ID found for {item_id}") return [] if detected_type: media_type = detected_type elif item_id.startswith("tmdb:"): tmdb_id = int(item_id.split(":")[1]) + # Detect media_type if unknown or invalid + if media_type not in ("movie", "tv", "series"): + detected_type = None + try: + details = await self.tmdb_service.get_movie_details(tmdb_id) + if details: + detected_type = "movie" + except Exception: + pass + if not detected_type: + try: + details = await self.tmdb_service.get_tv_details(tmdb_id) + if details: + detected_type = "tv" + except Exception: + pass + if detected_type: + media_type = detected_type else: tmdb_id = item_id - recommendation_response = await self.tmdb_service.get_recommendations(tmdb_id, media_type) + # Normalize series alias + mtype = "tv" if media_type in ("tv", "series") else "movie" + recommendation_response = await self.tmdb_service.get_recommendations(tmdb_id, mtype) recommended_items = recommendation_response.get("results", []) if not recommended_items: return [] - return recommended_items[:limit] + return recommended_items async def get_recommendations( self, content_type: str | None = None, - source_items_limit: int = 2, - recommendations_per_source: int = 5, - max_results: int = 50, - include_watched: bool = False, + source_items_limit: int = 5, + max_results: int = 20, ) -> list[dict]: """ - Get recommendations based on user's Stremio library. - - Process: - 1. Get user's loved items from library (these are "source items" we use to find similar content) - 2. If include_watched is True, also include watched items as source items - 3. Get user's watched items (these will be excluded from recommendations) - 4. For each source item, fetch recommendations from TMDB - 5. Filter out items already watched - 6. Aggregate and deduplicate recommendations - 7. Sort by relevance score - 8. Fetch full metadata for final list - - Args: - content_type: "movie" or "series" - source_items_limit: How many items to use as sources (default: 2) - recommendations_per_source: How many recommendations per source item (default: 5) - max_results: Maximum total recommendations to return (default: 50) - include_watched: If True, include watched items as source items in addition to loved items (default: False) + Get Smart Hybrid Recommendations. """ if not content_type: logger.warning("content_type must be specified (movie or series)") return [] - logger.info(f"Getting recommendations for {content_type} (include_watched: {include_watched})") + logger.info(f"Starting Hybrid Recommendation Pipeline for {content_type}") - # Step 1: Fetch user's library items (both watched and loved) + # Step 1: Fetch & Score User Library library_data = await self.stremio_service.get_library_items() - loved_items = library_data.get("loved", []) - watched_items = library_data.get("watched", []) - - # Step 2: Build source items list based on config - if include_watched: - all_source_items = watched_items - logger.info(f"Using watched items ({len(watched_items)}) as sources") - else: - # Only use loved items - all_source_items = loved_items - logger.info(f"Using only loved items ({len(loved_items)}) as sources") - - if not all_source_items: - logger.warning( - f"No {'loved or watched' if include_watched else 'loved'} library items found, returning empty" - " recommendations" - ) - return [] - - # Step 3: Filter source items by content type (only use movies for movie recommendations) - source_items_of_type = [item for item in all_source_items if item.get("type") == content_type] - - if not source_items_of_type: - logger.warning(f"No {content_type} items found in library") - return [] - - # Step 4: Select most recent items as "source items" for finding recommendations - # (These are the items we'll use to find similar content) - # Sort by modification time (most recent first) if available - source_items_of_type.sort(key=lambda x: x.get("_mtime", ""), reverse=True) - source_items = source_items_of_type[:source_items_limit] - logger.info(f"Using {len(source_items)} most recent {content_type} items as sources") - - # Step 4: Build exclusion sets (IMDB IDs and TMDB IDs) for watched items - # We don't want to recommend things the user has already watched - watched_imdb_ids: set[str] = set() - watched_tmdb_ids: set[int] = set() - for item in watched_items: - imdb_id, tmdb_id = _parse_identifier(item.get("_id", "")) - if imdb_id: - watched_imdb_ids.add(imdb_id) - if tmdb_id: - watched_tmdb_ids.add(tmdb_id) - - logger.info(f"Built exclusion sets: {len(watched_imdb_ids)} IMDB IDs, {len(watched_tmdb_ids)} TMDB IDs") - - # Step 5: Process each source item in parallel to get recommendations - # Each source item will generate its own set of recommendations - recommendation_tasks = [ - self._fetch_recommendations_from_tmdb( - source_item.get("_id"), - source_item.get("type"), - recommendations_per_source, + all_items = library_data.get("loved", []) + library_data.get("watched", []) + library_data.get("added", []) + logger.info(f"processing {len(all_items)} Items.") + # Cold-start fallback remains (redundant safety) + if not all_items: + all_items = library_data.get("added", []) + + # Build Exclusion Sets explicitly + watched_imdb_ids, watched_tmdb_ids = await self._get_exclusion_sets() + + # Deduplicate and Filter by Type + unique_items = {item["_id"]: item for item in all_items if item.get("type") == content_type} + processed_items = [] + scored_objects = [] + + sorted_history = sorted( + unique_items.values(), key=lambda x: x.get("state", {}).get("lastWatched"), reverse=True + ) + recent_history = sorted_history[:source_items_limit] + + for item_data in recent_history: + scored_obj = self.scoring_service.process_item(item_data) + scored_objects.append(scored_obj) + item_data["_interest_score"] = scored_obj.score + processed_items.append(item_data) + + processed_items.sort(key=lambda x: x["_interest_score"], reverse=True) + top_source_items = processed_items[:source_items_limit] + + # --- Candidate Set A: Item-based Similarity --- + tasks_a = [] + for source in top_source_items: + tasks_a.append(self._fetch_recommendations_from_tmdb(source.get("_id"), source.get("type"), limit=10)) + similarity_candidates = [] + similarity_recommendations = await asyncio.gather(*tasks_a, return_exceptions=True) + + excluded_ids = set(self._get_excluded_genre_ids(content_type)) + + similarity_recommendations = [item for item in similarity_recommendations if not isinstance(item, Exception)] + for batch in similarity_recommendations: + similarity_candidates.extend( + item for item in batch if not excluded_ids.intersection(item.get("genre_ids") or []) ) - for source_item in source_items - ] - all_recommendation_results = await asyncio.gather(*recommendation_tasks, return_exceptions=True) - - # Step 6: Aggregate recommendations from all source items - # Use dictionary to deduplicate by IMDB ID and combine scores - unique_recommendations: dict[str, dict] = {} # Key: IMDB ID, Value: Full recommendation data - - flat_recommendations = [] - for recommendation_batch in all_recommendation_results: - if isinstance(recommendation_batch, Exception): - logger.warning(f"Error processing source item: {recommendation_batch}") - continue - - for recommendation in recommendation_batch: - flat_recommendations.append(recommendation) - - # Step 7: Deduplicate and filter BEFORE fetching full meta - filtered_tmdb_items = [] - seen_tmdb_ids = set() - for item in flat_recommendations: - tmdb_id = item.get("id") - if not tmdb_id or tmdb_id in seen_tmdb_ids or tmdb_id in watched_tmdb_ids: - continue + # --- Candidate Set B: Profile-based Discovery --- + # Extract excluded genres + excluded_genres = list(excluded_ids) # Convert back to list for consistency - # Simple dedupe based on TMDB ID first - seen_tmdb_ids.add(tmdb_id) + # Use typed profile based on content_type + user_profile = await self.user_profile_service.build_user_profile( + scored_objects, content_type=content_type, excluded_genres=excluded_genres + ) + discovery_candidates = await self.discovery_engine.discover_recommendations( + user_profile, content_type, limit=20, excluded_genres=excluded_genres + ) - # We'll do the full scoring logic after fetching meta, but we can prep unique list now - filtered_tmdb_items.append(item) + # --- Combine & Deduplicate --- + candidate_pool = {} # tmdb_id -> item_dict - # Optimization: If we have way too many, cut off early - if len(filtered_tmdb_items) >= max_results * 2: - break + for item in discovery_candidates: + candidate_pool[item["id"]] = item - # Step 8: Fetch full metadata - final_recommendations = await self._fetch_metadata_for_items(filtered_tmdb_items, content_type) + for item in similarity_candidates: + # add score to boost similarity candidates + item["_ranked_candidate"] = True + candidate_pool[item["id"]] = item - for meta_data in final_recommendations: - imdb_id = meta_data.get("imdb_id") or meta_data.get("id") + # --- Re-Ranking & Filtering --- + ranked_candidates = [] - # Skip if already watched or no IMDB ID - if not imdb_id or imdb_id in watched_imdb_ids: + for tmdb_id, item in candidate_pool.items(): + # 1. Strict Filter by TMDB ID + if tmdb_id in watched_tmdb_ids or f"tmdb:{tmdb_id}" in watched_imdb_ids: continue - if imdb_id not in unique_recommendations: - # Base score from IMDB rating + sim_score = self.user_profile_service.calculate_similarity(user_profile, item) + vote_average = item.get("vote_average", 0) + popularity = item.get("popularity", 0) + + pop_score = normalize(popularity, 0, 1000) + vote_score = normalize(vote_average, 0, 10) + + final_score = (sim_score * 0.6) + (vote_score * 0.3) + (pop_score * 0.1) + + # Add tiny jitter to promote freshness and avoid static ordering + jitter = random.uniform(-0.02, 0.02) # +/-2% + final_score = final_score * (1 + jitter) + + # Boost candidate if its from tmdb collaborative recommendations + if item.get("_ranked_candidate"): + final_score *= 1.25 + ranked_candidates.append((final_score, item)) + + # Sort by Final Score and cache score on item for diversification + ranked_candidates.sort(key=lambda x: x[0], reverse=True) + for score, item in ranked_candidates: + item["_final_score"] = score + + # Diversify with MMR to avoid shallow, repetitive picks + def _jaccard(a: set, b: set) -> float: + if not a and not b: + return 0.0 + inter = len(a & b) + union = len(a | b) + return inter / union if union else 0.0 + + def _candidate_similarity(x: dict, y: dict) -> float: + gx = set(x.get("genre_ids") or []) + gy = set(y.get("genre_ids") or []) + s = _jaccard(gx, gy) + # Mild penalty if same language to encourage variety + lx = x.get("original_language") + ly = y.get("original_language") + if lx and ly and lx == ly: + s += 0.05 + return min(s, 1.0) + + def _mmr_select(cands: list[dict], k: int, lamb: float = 0.75) -> list[dict]: + selected: list[dict] = [] + remaining = cands[:] + while remaining and len(selected) < k: + if not selected: + best = remaining.pop(0) + selected.append(best) + continue + best_item = None + best_score = float("-inf") + for cand in remaining[:50]: # evaluate a window for speed + rel = cand.get("_final_score", 0.0) + div = 0.0 + for s in selected: + div = max(div, _candidate_similarity(cand, s)) + mmr = lamb * rel - (1 - lamb) * div + if mmr > best_score: + best_score = mmr + best_item = cand + if best_item is None: + break + selected.append(best_item) try: - score = float(meta_data.get("imdbRating", 0)) - except (ValueError, TypeError): - score = 0.0 - meta_data["_score"] = score - unique_recommendations[imdb_id] = meta_data - else: - # Boost score if recommended by multiple source items - existing_recommendation = unique_recommendations[imdb_id] - try: - additional_score = float(meta_data.get("imdbRating", 0)) - except (ValueError, TypeError): - additional_score = 0.0 - existing_recommendation["_score"] = existing_recommendation.get("_score", 0) + additional_score - - # Early exit if we have enough results - if len(unique_recommendations) >= max_results: - break - - # Step 9: Sort by score (higher score = more relevant, appears from more sources) - sorted_recommendations = sorted( - unique_recommendations.values(), - key=lambda x: x.get("_score", 0), - reverse=True, - ) - - logger.info(f"Generated {len(sorted_recommendations)} unique recommendations") - return sorted_recommendations + remaining.remove(best_item) + except ValueError: + pass + return selected + + top_ranked_items = [item for _, item in ranked_candidates] + diversified = _mmr_select(top_ranked_items, k=max_results * 2, lamb=0.75) + # Select with buffer for final IMDB filtering after diversification + buffer_selection = diversified + + # Fetch Full Metadata + meta_items = await self._fetch_metadata_for_items(buffer_selection, content_type) + + # Final Strict Filter by IMDB ID + final_items = [] + for item in meta_items: + if item["id"] in watched_imdb_ids: + continue + ext_ids = item.get("_external_ids", {}) + if ext_ids.get("imdb_id") in watched_imdb_ids: + continue - async def get_recommendations_for_genre(self, genre_id: str, media_type: str) -> list[dict]: - """ - Get recommendations for a specific genre. - """ - # parse genre ids first - # remove watchly.genre. prefix - genre_id = genre_id.replace("watchly.genre.", "") - - # genre_id params, replace - with , and _ with | - genre_id_params = genre_id.replace("-", ",").replace("_", "|") - # now call discover api - # get recommendations from tmdb api - recommendations = await self.tmdb_service.get_discover( - media_type=media_type, - with_genres=genre_id_params, - sort_by="popularity.desc", - ) - recommendations = recommendations.get("results", []) + item.pop("_external_ids", None) + final_items.append(item) - return await self._fetch_metadata_for_items(recommendations, media_type) + return final_items diff --git a/app/services/row_generator.py b/app/services/row_generator.py new file mode 100644 index 0000000..250177e --- /dev/null +++ b/app/services/row_generator.py @@ -0,0 +1,172 @@ +import random + +from pydantic import BaseModel + +from app.models.profile import UserTasteProfile +from app.services.gemini import gemini_service +from app.services.tmdb.countries import COUNTRY_ADJECTIVES +from app.services.tmdb.genre import movie_genres, series_genres +from app.services.tmdb_service import TMDBService + + +def normalize_keyword(kw): + return kw.strip().replace("-", " ").replace("_", " ").title() + + +class RowDefinition(BaseModel): + """ + Defines a dynamic catalog row. + """ + + title: str + id: str # Encoded params: watchly.theme.g_k + genres: list[int] = [] + keywords: list[int] = [] + country: str | None = None + year_range: tuple[int, int] | None = None + + @property + def is_valid(self): + return bool(self.genres or self.keywords or self.country or self.year_range) + + +class RowGeneratorService: + """ + Generates aesthetic, personalized row definitions from a User Taste Profile. + """ + + def __init__(self, tmdb_service: TMDBService | None = None): + self.tmdb_service = tmdb_service or TMDBService() + + async def generate_rows(self, profile: UserTasteProfile, content_type: str = "movie") -> list[RowDefinition]: + """ + Generate a diverse set of 3-5 thematic rows. + Async to allow fetching names for keywords. + """ + rows = [] + + # Extract features + top_genres = profile.get_top_genres(limit=3) # [(id, score), ...] + top_keywords = profile.get_top_keywords(limit=4) # [(id, score), ...] + top_countries = profile.get_top_countries(limit=1) # [(code, score)] + top_years = profile.years.get_top_features(limit=1) # [(decade_start, score)] + + genre_map = movie_genres if content_type == "movie" else series_genres + + # Helper to get genre name safely + def get_gname(gid): + return genre_map.get(gid, "Movies") + + def get_cname(code): + adjectives = COUNTRY_ADJECTIVES.get(code, []) + if adjectives: + return random.choice(adjectives) + return "" + + # Strategy 1: Combined Keyword Row (Top Priority) + if top_keywords: + k_id1 = top_keywords[0][0] + kw_name1 = await self._get_keyword_name(k_id1) + + use_single_keyword_row = True + if len(top_keywords) >= 2: + k_id2 = top_keywords[1][0] + kw_name2 = await self._get_keyword_name(k_id2) + title = "" + if kw_name1 and kw_name2: + title = await gemini_service.generate_content_async(f"Keywords: {kw_name1} + {kw_name2}") + + if title: + rows.append( + RowDefinition( + title=title, + id=f"watchly.theme.k{k_id1}.k{k_id2}", + keywords=[k_id1, k_id2], + ) + ) + use_single_keyword_row = False + + if use_single_keyword_row and kw_name1: + rows.append( + RowDefinition( + title=normalize_keyword(kw_name1), + id=f"watchly.theme.k{k_id1}", + keywords=[k_id1], + ) + ) + + # Strategy 2: Keyword + Genre (Specific Niche) + if top_genres and len(top_keywords) > 2: + g_id = top_genres[0][0] + # get random keywords: Just to surprise user in every refresh + k_id = random.choice(top_keywords[2:])[0] + + if k_id: + kw_name = await self._get_keyword_name(k_id) + if kw_name: + title = await gemini_service.generate_content_async( + f"Genre: {get_gname(g_id)} + Keyword: {normalize_keyword(kw_name)}" + ) + if not title: + title = f"{normalize_keyword(kw_name)} {get_gname(g_id)}" + # keyword and genre can have same name sometimes, remove if so + title = " ".join(dict.fromkeys(title.split())) + + rows.append( + RowDefinition( + title=title, + id=f"watchly.theme.g{g_id}.k{k_id}", + genres=[g_id], + keywords=[k_id], + ) + ) + + # Strategy 3: Genre + Country (e.g. "Bollywood Action") + if top_countries and len(top_genres) > 0: + g_id = top_genres[0][0] if len(top_genres) == 1 else top_genres[1][0] + c_code = top_countries[0][0] + c_adj = get_cname(c_code) + if c_adj: + title = await gemini_service.generate_content_async(f"Genre: {get_gname(g_id)} + Country: {c_adj}") + if not title: + title = f"{c_adj} {get_gname(g_id)}" + rows.append( + RowDefinition( + title=title, + id=f"watchly.theme.g{g_id}.ct{c_code}", # ct for country + genres=[g_id], + country=c_code, + ) + ) + + # Strategy 4: Genre + Era ("90s Action") + if len(top_genres) > 0 and top_years: + # Use 3rd genre if available for diversity, else 1st + g_id = top_genres[0][0] + if len(top_genres) > 2: + g_id = top_genres[2][0] + + decade_start = top_years[0][0] + # # Only do this if decade is valid and somewhat old (nostalgia factor) + if 1970 <= decade_start <= 2010: + decade_str = str(decade_start)[2:] + "s" # "90s" + title = await gemini_service.generate_content_async(f"Genre: {get_gname(g_id)} + Era: {decade_str}") + if not title: + title = f"{decade_str} {get_gname(g_id)}" + rows.append( + RowDefinition( + title=title, + id=f"watchly.theme.g{g_id}.y{decade_start}", + genres=[g_id], + year_range=(decade_start, decade_start + 9), + ) + ) + + return rows + + async def _get_keyword_name(self, keyword_id: int) -> str | None: + try: + data = await self.tmdb_service._make_request(f"/keyword/{keyword_id}") + return data.get("name") + except Exception: + return None diff --git a/app/services/rpdb.py b/app/services/rpdb.py new file mode 100644 index 0000000..8d4e6d6 --- /dev/null +++ b/app/services/rpdb.py @@ -0,0 +1,7 @@ +class RPDBService: + @staticmethod + def get_poster_url(api_key: str, item_id: str) -> str: + """ + Get poster URL for a specific item by IMDB ID. + """ + return f"https://api.ratingposterdb.com/{api_key}/imdb/poster-default/{item_id}.jpg?fallback=true" diff --git a/app/services/scoring.py b/app/services/scoring.py new file mode 100644 index 0000000..c13b80b --- /dev/null +++ b/app/services/scoring.py @@ -0,0 +1,183 @@ +from datetime import datetime, timezone + +from app.models.scoring import ScoredItem, StremioLibraryItem + + +class ScoringService: + """ + Service for calculating user interest scores for library items. + It consumes raw dictionary data or Pydantic models and returns enriched ScoredItems. + """ + + # TODO: Make this a bit more complex based on more parameters. + # Rewatch, How many times? Watched but duration?? What if user stopped watching in middle? + + # Weights for different factors + WEIGHT_WATCH_PERCENTAGE = 0.10 + WEIGHT_REWATCH = 0.17 + WEIGHT_RECENCY = 0.30 + WEIGHT_EXPLICIT_RATING = 0.35 + ADDED_TO_LIBRARY_WEIGHT = 0.08 + + def process_item(self, raw_item: dict) -> ScoredItem: + """ + Process a raw Stremio item dictionary into a ScoredItem. + """ + # Convert dict to Pydantic model for validation and typing + item = StremioLibraryItem(**raw_item) + + score_data = self._calculate_score_components(item) + + return ScoredItem( + item=item, + score=score_data["final_score"], + completion_rate=score_data["completion_rate"], + is_rewatched=score_data["is_rewatched"], + is_recent=score_data["is_recent"], + source_type="loved" if item.is_loved else ("liked" if item.is_liked else "watched"), + ) + + def calculate_score( + self, item: dict | StremioLibraryItem, is_loved: bool = False, is_liked: bool = False + ) -> float: + """ + Backwards compatible method to just get the float score. + Accepts either a raw dict or a StremioLibraryItem. + """ + if isinstance(item, dict): + # Temporarily inject flags if passed separately (legacy support) + if "_is_loved" not in item: + item["_is_loved"] = is_loved + if "_is_liked" not in item: + item["_is_liked"] = is_liked + model_item = StremioLibraryItem(**item) + else: + model_item = item + + return self._calculate_score_components(model_item)["final_score"] + + def _calculate_score_components(self, item: StremioLibraryItem) -> dict: + """Internal logic to calculate score components.""" + state = item.state + + # 1. Completion Score + completion_score = 0.0 + completion_rate = 0.0 + + # Prefer ratio-based completion when duration is available to avoid + # treating short partial plays as full completion just because + # `timesWatched` was incremented. If duration is missing, fall back + # to conservative estimates based on timesWatched/flaggedWatched. + if state.duration and state.duration > 0: + try: + ratio = min(float(state.timeWatched) / float(state.duration), 1.0) + except Exception: + ratio = 0.0 + completion_rate = ratio + completion_score = ratio * 100.0 + + # If the item was explicitly marked watched or has timesWatched but + # the observed ratio is very small, give a modest boost (not full 100). + if (state.timesWatched > 0 or state.flaggedWatched > 0) and completion_score < 50.0: + completion_score = max(completion_score, 50.0) + completion_rate = max(completion_rate, 0.5) + elif state.timesWatched > 0 or state.flaggedWatched > 0: + # No duration information: use a conservative assumed completion. + completion_score = 80.0 + completion_rate = 0.8 + + # 2. Rewatch Bonus + # We compute rewatch strength using two complementary metrics: + # - times_based: how many extra explicit watches the user has (timesWatched - 1) + # - ratio_based: overallTimeWatched / duration measures how many full-length equivalents + # If duration is missing we fall back to conservative estimators to avoid false positives. + rewatch_score = 0.0 + is_rewatched = False + if state.timesWatched > 1 and not state.flaggedWatched: + is_rewatched = True + + # times-based component (each extra watch gives a boost) + times_component = (state.timesWatched - 1) * 50 + + # ratio-based component: how many full durations the user has watched in total + ratio_component = 0.0 + try: + overall_timewatched = float(state.overallTimeWatched or 0) + duration = float(state.duration or 0) + if duration > 0 and overall_timewatched > 0: + watch_ratio = overall_timewatched / duration + ratio_component = max((watch_ratio - 1.0) * 100.0, 0.0) + else: + # If duration is missing, be conservative: estimate based on timeWatched + # If timeWatched exists, assume it approximates one viewing; otherwise use timesWatched + time_watched = float(state.timeWatched or 0) + if time_watched > 0: + # assume a single-view baseline equal to time_watched, so overall/time_watched ~= times + ratio_est = ( + overall_timewatched / time_watched if time_watched > 0 else float(state.timesWatched) + ) + ratio_component = max((ratio_est - 1.0) * 100.0, 0.0) + else: + ratio_component = max((float(state.timesWatched) - 1.0) * 20.0, 0.0) + except Exception: + ratio_component = 0.0 + + # Combine components but clamp to reasonable bounds + combined = max(times_component, ratio_component) + rewatch_score = min(combined, 100.0) + + # 3. Recency Score + recency_score = 0.0 + is_recent = False + if state.lastWatched: + now = datetime.now(timezone.utc) + # Ensure timezone awareness + last_watched = state.lastWatched + if last_watched.tzinfo is None: + last_watched = last_watched.replace(tzinfo=timezone.utc) + + days_since = (now - last_watched).days + + if days_since < 7: + recency_score = 200 + is_recent = True + elif days_since < 30: + recency_score = 100 + is_recent = True + elif days_since < 90: + recency_score = 70 + elif days_since < 180: + recency_score = 30 + elif days_since < 365: + recency_score = 10 + + # 4. Explicit Rating Score + rating_score = 0.0 + if item.is_loved: + rating_score = 100.0 + elif item.is_liked: + rating_score = 70.0 + + # 5. Added to Library Score + added_to_library_score = 0.0 + if not item.temp and not item.removed: + added_to_library_score = 100.0 + # if item.removed: + # # should we penalize for removed items? + # added_to_library_score = -50.0 + + # Calculate Final Score + final_score = ( + (completion_score * self.WEIGHT_WATCH_PERCENTAGE) + + (rewatch_score * self.WEIGHT_REWATCH) + + (recency_score * self.WEIGHT_RECENCY) + + (rating_score * self.WEIGHT_EXPLICIT_RATING) + + (added_to_library_score * self.ADDED_TO_LIBRARY_WEIGHT) + ) + + return { + "final_score": min(max(final_score, 0), 100), + "completion_rate": completion_rate, + "is_rewatched": is_rewatched, + "is_recent": is_recent, + } diff --git a/app/services/stremio_service.py b/app/services/stremio_service.py index e828023..1bee937 100644 --- a/app/services/stremio_service.py +++ b/app/services/stremio_service.py @@ -1,16 +1,32 @@ import asyncio +import random +from urllib.parse import urlparse import httpx +from async_lru import alru_cache from loguru import logger from app.core.config import settings BASE_CATALOGS = [ - {"type": "movie", "id": "watchly.rec", "name": "Recommended", "extra": []}, - {"type": "series", "id": "watchly.rec", "name": "Recommended", "extra": []}, + {"type": "movie", "id": "watchly.rec", "name": "Top Picks for You", "extra": []}, + {"type": "series", "id": "watchly.rec", "name": "Top Picks for You", "extra": []}, ] +def match_hostname(url: str, hostname: str) -> bool: + """Return True if the URL host matches the target host (scheme-agnostic). + + Accepts `hostname` as either a naked host (example.com) or full URL (https://example.com). + """ + try: + url_host = urlparse(url if "://" in url else f"https://{url}").hostname + target_host = urlparse(hostname if "://" in hostname else f"https://{hostname}").hostname + return bool(url_host and target_host and url_host.lower() == target_host.lower()) + except Exception: + return False + + class StremioService: """Service for interacting with Stremio API to fetch user library.""" @@ -34,8 +50,13 @@ async def _get_client(self) -> httpx.AsyncClient: """Get or create the main Stremio API client.""" if self._client is None: self._client = httpx.AsyncClient( - timeout=30.0, + timeout=10.0, limits=httpx.Limits(max_keepalive_connections=10, max_connections=50), + http2=True, + headers={ + "User-Agent": "Watchly/Client", + "Accept": "application/json", + }, ) return self._client @@ -43,8 +64,13 @@ async def _get_likes_client(self) -> httpx.AsyncClient: """Get or create the likes API client.""" if self._likes_client is None: self._likes_client = httpx.AsyncClient( - timeout=30.0, + timeout=10.0, limits=httpx.Limits(max_keepalive_connections=10, max_connections=50), + http2=True, + headers={ + "User-Agent": "Watchly/Client", + "Accept": "application/json", + }, ) return self._likes_client @@ -71,9 +97,8 @@ async def _login_for_auth_key(self) -> str: try: client = await self._get_client() - result = await client.post(url, json=payload) - result.raise_for_status() - data = result.json() + result = await self._post_with_retries(client, url, json=payload) + data = result auth_key = data.get("result", {}).get("authKey", "") if auth_key: logger.info("Successfully authenticated with Stremio") @@ -93,18 +118,18 @@ async def _login_for_auth_key(self) -> str: raise async def get_auth_key(self) -> str: - """Return a cached auth key or login to retrieve one.""" - if self._auth_key: - return self._auth_key - auth_key = await self._login_for_auth_key() - if not auth_key: - raise ValueError("Failed to obtain Stremio auth key") - return auth_key - - async def is_loved(self, auth_key: str, imdb_id: str, media_type: str) -> bool: - """Check if user has loved a movie or series.""" + """Return the cached auth key.""" + if not self._auth_key: + raise ValueError("Stremio auth key is missing.") + return self._auth_key + + async def is_loved(self, auth_key: str, imdb_id: str, media_type: str) -> tuple[bool, bool]: + """ + Check if user has loved or liked a movie or series. + Returns: (is_loved, is_liked) + """ if not imdb_id.startswith("tt"): - return False + return False, False url = "https://likes.stremio.com/api/get_status" params = { "authToken": auth_key, @@ -114,27 +139,86 @@ async def is_loved(self, auth_key: str, imdb_id: str, media_type: str) -> bool: try: client = await self._get_likes_client() - result = await client.get(url, params=params) - result.raise_for_status() - status = result.json().get("status", "") - if status and status.lower() == "loved": - return True - else: - return False + result = await self._get_with_retries(client, url, params=params) + status = result.get("status", "") + return (status == "loved", status == "liked") except Exception as e: logger.error( f"Error checking if user has loved a movie or series: {e}", exc_info=True, ) - return False + return False, False + + @alru_cache(maxsize=1000, ttl=3600) + async def get_loved_items(self, auth_token: str, media_type: str) -> list[str]: + url = f"https://likes.stremio.com/addons/loved/movies-shows/{auth_token}/catalog/{media_type}/stremio-loved-{media_type.lower()}.json" # noqa + try: + client = await self._get_likes_client() + data = await self._get_with_retries(client, url) + metas = data.get("metas", []) + return [meta.get("id") for meta in metas] + except Exception as e: + logger.warning(f"Failed to fetch loved items: {e}") + return [] + + @alru_cache(maxsize=1000, ttl=3600) + async def get_liked_items(self, auth_token: str, media_type: str) -> list[str]: + url = f"https://likes.stremio.com/addons/liked/movies-shows/{auth_token}/catalog/{media_type}/stremio-liked-{media_type.lower()}.json" # noqa + try: + client = await self._get_likes_client() + data = await self._get_with_retries(client, url) + metas = data.get("metas", []) + return [meta.get("id") for meta in metas] + except Exception as e: + logger.warning(f"Failed to fetch liked items: {e}") + return [] + + async def get_user_info(self) -> dict[str, str]: + """Fetch user ID and email using the auth key.""" + if not self._auth_key: + raise ValueError("Stremio auth key is missing.") + + url = f"{self.base_url}/api/getUser" + payload = { + "type": "GetUser", + "authKey": self._auth_key, + } + + try: + client = await self._get_client() + data = await self._post_with_retries(client, url, json=payload) + + if "error" in data: + error_msg = data["error"] + if isinstance(error_msg, dict): + error_msg = error_msg.get("message", "Unknown error") + raise ValueError(f"Stremio Error: {error_msg}") + + # Structure: { result: { _id, email, ... } } + res = data.get("result", {}) + user_id = res.get("_id", "") + email = res.get("email", "") + + if not user_id: + raise ValueError("Could not retrieve user ID from Stremio profile.") + + return {"user_id": user_id, "email": email} + except Exception as e: + logger.error(f"Error fetching user profile: {e}") + raise + + async def get_user_email(self) -> str: + """Fetch user email using the auth key.""" + user_info = await self.get_user_info() + return user_info.get("email", "") async def get_library_items(self) -> dict[str, list[dict]]: """ Fetch library items from Stremio once and return both watched and loved items. Returns a dict with 'watched' and 'loved' keys. """ - if not self._auth_key and (not self.username or not self.password): - logger.warning("Stremio credentials not configured") + if not self._auth_key: + logger.warning("Stremio auth key not configured") return {"watched": [], "loved": []} try: @@ -153,96 +237,94 @@ async def get_library_items(self) -> dict[str, list[dict]]: } client = await self._get_client() - result = await client.post(url, json=payload) - result.raise_for_status() - items = result.json().get("result", []) + data = await self._post_with_retries(client, url, json=payload) + items = data.get("result", []) logger.info(f"Fetched {len(items)} library items from Stremio") - # Filter only items that user has watched - watched_items = [ - item - for item in items - if ( - item.get("state", {}).get("timesWatched", 0) > 0 - and item.get("type") in ["movie", "series"] - and item.get("_id").startswith("tt") - ) - ] + # Filter items considered watched: explicit timesWatched/flaggedWatched OR high completion ratio + watched_items = [] + for item in items: + if item.get("type") not in ["movie", "series"]: + continue + item_id = item.get("_id", "") + if not item_id.startswith("tt"): + continue + state = item.get("state", {}) or {} + times_watched = int(state.get("timesWatched") or 0) + flagged_watched = int(state.get("flaggedWatched") or 0) + duration = int(state.get("duration") or 0) + time_watched = int(state.get("timeWatched") or 0) + ratio_ok = duration > 0 and (time_watched / duration) >= 0.7 + if times_watched > 0 or flagged_watched > 0 or ratio_ok: + watched_items.append(item) logger.info(f"Filtered {len(watched_items)} watched library items") - # Sort watched items by watched time (most recent first) - watched_items.sort(key=lambda x: x.get("state", {}).get("lastWatched", ""), reverse=True) + # Sort watched items by lastWatched, fallback to _mtime (most recent first) + def _sort_key(x: dict): + state = x.get("state", {}) or {} + return ( + str(state.get("lastWatched") or ""), + str(x.get("_mtime") or ""), + ) + + watched_items.sort(key=_sort_key, reverse=True) - # is_loved only until we find 10 movies and 10 series loved_items = [] - movies_found = 0 - series_found = 0 - target_count = settings.RECOMMENDATION_SOURCE_ITEMS_LIMIT - batch_size = 20 - - # Process in batches to stop early - for i in range(0, len(watched_items), batch_size): - if movies_found >= target_count and series_found >= target_count: - logger.info("Found enough loved items, stopping check") - break - - batch = watched_items[i : i + batch_size] # noqa: E203 - - # Filter batch to only check types we still need - check_candidates = [] - for item in batch: - itype = item.get("type") - if itype == "movie" and movies_found < target_count: - check_candidates.append(item) - elif itype == "series" and series_found < target_count: - check_candidates.append(item) - - if not check_candidates: - continue + added_items = [] + removed_items = [] - # Check loved status for candidates in parallel - loved_statuses = await asyncio.gather( - *[self.is_loved(auth_key, item.get("_id"), item.get("type")) for item in check_candidates] - ) + # fetch loved and liked items - # Process results - for item, is_loved_status in zip(check_candidates, loved_statuses): - if is_loved_status: - loved_items.append(item) - if item.get("type") == "movie": - movies_found += 1 - elif item.get("type") == "series": - series_found += 1 - - logger.info( - f"Found {len(loved_items)} loved library items (Movies: {movies_found}, Series: {series_found})" + loved_movies, loved_series, liked_movies, liked_series = await asyncio.gather( + self.get_loved_items(auth_key, "movie"), + self.get_loved_items(auth_key, "series"), + self.get_liked_items(auth_key, "movie"), + self.get_liked_items(auth_key, "series"), ) - # Format watched items - formatted_watched = [] + watched_ids = {i.get("_id") for i in watched_items} + for item in watched_items: - formatted_watched.append( - { - "type": item.get("type"), - "_id": item.get("_id"), - "_mtime": item.get("state", {}).get("lastWatched", ""), - "name": item.get("name"), - } - ) + loved = False + if item.get("_id") in loved_movies or item.get("_id") in loved_series: + item["_is_loved"] = True + loved = True + if item.get("_id") in liked_movies or item.get("_id") in liked_series: + item["_is_liked"] = True + loved = True + + if loved: + loved_items.append(item) + + logger.info(f"Found {len(loved_items)} loved library items") + + # Build added-only items: in library, type movie/series, imdb id, not watched, not loved/liked + for item in items: + if item.get("type") not in ["movie", "series"]: + continue + iid = item.get("_id", "") + if not iid.startswith("tt"): + continue + if iid in watched_ids: + continue + if iid in loved_movies or iid in loved_series or iid in liked_movies or iid in liked_series: + continue + if item.get("temp"): + continue + if item.get("removed"): + removed_items.append(item) + continue - # Format loved items (they are already somewhat sorted by discovery order, which aligns with mtime) - formatted_loved = [] - for item in loved_items: - formatted_loved.append( - { - "type": item.get("type"), - "_id": item.get("_id"), - "_mtime": item.get("state", {}).get("lastWatched", ""), - "name": item.get("name"), - } - ) + added_items.append(item) - return {"watched": formatted_watched, "loved": formatted_loved} + logger.info(f"Found {len(added_items)} added (unwatched) and {len(removed_items)} removed library items") + # Return raw items; ScoringService will handle Pydantic conversion + return { + "watched": watched_items, + "loved": loved_items, + "added": added_items, + "removed": removed_items, + } except Exception as e: logger.error(f"Error fetching library items: {e}", exc_info=True) return {"watched": [], "loved": []} @@ -256,9 +338,7 @@ async def get_addons(self, auth_key: str | None = None) -> list[dict]: "update": True, } client = await self._get_client() - result = await client.post(url, json=payload) - result.raise_for_status() - data = result.json() + data = await self._post_with_retries(client, url, json=payload) error_payload = data.get("error") if not error_payload and (data.get("code") and data.get("message")): error_payload = data @@ -285,10 +365,9 @@ async def update_addon(self, addons: list[dict], auth_key: str | None = None): } client = await self._get_client() - result = await client.post(url, json=payload) - result.raise_for_status() + data = await self._post_with_retries(client, url, json=payload) logger.info("Updated addons") - return result.json().get("result", {}).get("success", False) + return data.get("result", {}).get("success", False) async def update_catalogs(self, catalogs: list[dict], auth_key: str | None = None): auth_key = auth_key or await self.get_auth_key() @@ -297,7 +376,9 @@ async def update_catalogs(self, catalogs: list[dict], auth_key: str | None = Non logger.info(f"Found {len(addons)} addons") # find addon with id "com.watchly" for addon in addons: - if addon.get("manifest", {}).get("id") == settings.ADDON_ID: + if addon.get("manifest", {}).get("id") == settings.ADDON_ID and match_hostname( + addon.get("transportUrl"), settings.HOST_NAME + ): logger.info(f"Found addon with id {settings.ADDON_ID}") addon["manifest"]["catalogs"] = catalogs break @@ -307,6 +388,74 @@ async def is_addon_installed(self, auth_key: str | None = None): auth_key = auth_key or await self.get_auth_key() addons = await self.get_addons(auth_key) for addon in addons: - if addon.get("manifest", {}).get("id") == settings.ADDON_ID: + if addon.get("manifest", {}).get("id") == settings.ADDON_ID and match_hostname( + addon.get("transportUrl"), settings.HOST_NAME + ): return True return False + + async def _post_with_retries(self, client: httpx.AsyncClient, url: str, json: dict, max_tries: int = 3) -> dict: + attempts = 0 + last_exc: Exception | None = None + while attempts < max_tries: + try: + resp = await client.post(url, json=json) + resp.raise_for_status() + return resp.json() + except httpx.HTTPStatusError as e: + status = e.response.status_code + if status == 429 or 500 <= status < 600: + attempts += 1 + backoff = (2 ** (attempts - 1)) + random.uniform(0, 0.25) + logger.warning( + f"Stremio POST {url} failed with {status}; retry {attempts}/{max_tries} in" f" {backoff:.2f}s" + ) + await asyncio.sleep(backoff) + last_exc = e + continue + raise + except httpx.RequestError as e: + attempts += 1 + backoff = (2 ** (attempts - 1)) + random.uniform(0, 0.25) + logger.warning( + f"Stremio POST {url} request error: {e}; retry {attempts}/{max_tries} in {backoff:.2f}s" + ) + await asyncio.sleep(backoff) + last_exc = e + continue + if last_exc: + raise last_exc + return {} + + async def _get_with_retries( + self, client: httpx.AsyncClient, url: str, params: dict | None = None, max_tries: int = 3 + ) -> dict: + attempts = 0 + last_exc: Exception | None = None + while attempts < max_tries: + try: + resp = await client.get(url, params=params) + resp.raise_for_status() + return resp.json() + except httpx.HTTPStatusError as e: + status = e.response.status_code + if status == 429 or 500 <= status < 600: + attempts += 1 + backoff = (2 ** (attempts - 1)) + random.uniform(0, 0.25) + logger.warning( + f"Stremio GET {url} failed with {status}; retry {attempts}/{max_tries} in" f" {backoff:.2f}s" + ) + await asyncio.sleep(backoff) + last_exc = e + continue + raise + except httpx.RequestError as e: + attempts += 1 + backoff = (2 ** (attempts - 1)) + random.uniform(0, 0.25) + logger.warning(f"Stremio GET {url} request error: {e}; retry {attempts}/{max_tries} in {backoff:.2f}s") + await asyncio.sleep(backoff) + last_exc = e + continue + if last_exc: + raise last_exc + return {} diff --git a/app/services/tmdb/countries.py b/app/services/tmdb/countries.py new file mode 100644 index 0000000..a0fb9f9 --- /dev/null +++ b/app/services/tmdb/countries.py @@ -0,0 +1,240 @@ +# Catchy adjectives for countries to make titles more engaging +COUNTRY_ADJECTIVES = { + # Major Film-Producing Countries + "US": ["American", "Hollywood"], + "GB": ["British", "English"], + "FR": ["French", "Français", "Parisian"], + "DE": ["German", "Deutsch", "Berlin"], + "JP": ["Japanese", "Nippon", "Tokyo"], + "KR": ["Korean", "K-Drama"], + "IN": ["Indian", "Bollywood"], + "CN": ["Chinese", "Mandarin"], + "ES": ["Spanish", "Español"], + "IT": ["Italian", "Italiano"], + "CA": ["Canadian", "Maple Leaf"], + "AU": ["Australian", "Down Under"], + "HK": ["Hong Kong", "Cantonese"], + "TW": ["Taiwanese", "Formosan"], + "RU": ["Russian", "Soviet"], + "BR": ["Brazilian", "Samba"], + "MX": ["Mexican", "Latino"], + "SE": ["Swedish", "Nordic"], + "DK": ["Danish", "Nordic"], + "NO": ["Norwegian", "Nordic"], + "FI": ["Finnish", "Nordic"], + "NL": ["Dutch", "Netherlands"], + "BE": ["Belgian", "Flemish"], + "PL": ["Polish", "Warsaw"], + "CZ": ["Czech", "Prague"], + "GR": ["Greek", "Athenian"], + "TR": ["Turkish", "Istanbul"], + "TH": ["Thai", "Bangkok", "Southeast Asian"], + "PH": ["Filipino"], + "ID": ["Indonesian", "Southeast Asian"], + "MY": ["Malaysian", "Southeast Asian"], + "SG": ["Singaporean", "Lion City", "Southeast Asian"], + "VN": ["Vietnamese", "Southeast Asian"], + "AR": ["Argentine", "Tango"], + "CL": ["Chilean", "Andean"], + "CO": ["Colombian", "Latin"], + "PE": ["Peruvian", "Andean"], + "ZA": ["South African", "African"], + "EG": ["Egyptian", "Nile"], + "NG": ["Nigerian", "Nollywood"], + "IE": ["Irish", "Celtic"], + "NZ": ["New Zealand", "Kiwi", "Aotearoa"], + "IS": ["Icelandic", "Nordic"], + "AT": ["Austrian", "Alpine"], + "CH": ["Swiss", "Alpine"], + "PT": ["Portuguese", "Iberian"], + "RO": ["Romanian", "Eastern European"], + "HU": ["Hungarian", "Central European"], + "BG": ["Bulgarian", "Balkan"], + "RS": ["Serbian", "Balkan"], + "HR": ["Croatian", "Balkan"], + "SI": ["Slovenian", "Balkan"], + "SK": ["Slovak", "Bratislava", "Central European"], + "IL": ["Israeli", "Middle Eastern"], + "IR": ["Iranian", "Persian"], + "SA": ["Saudi", "Arabian"], + "AE": ["Emirati", "Arabian"], + "PK": ["Pakistani", "South Asian"], + "BD": ["Bangladeshi", "South Asian"], + "LK": ["Sri Lankan", "South Asian"], + "NP": ["Nepalese", "Nepali"], + "MM": ["Myanmar", "Burmese"], + "KH": ["Cambodian", "Southeast Asian"], + "LA": ["Laotian", "Southeast Asian"], + # Additional countries with catchy adjectives + "AD": ["Andorran"], + "AF": ["Afghan"], + "AG": ["Antiguan"], + "AI": ["Anguillan"], + "AL": ["Albanian"], + "AM": ["Armenian"], + "AO": ["Angolan"], + "AQ": ["Antarctic"], + "AS": ["Samoan"], + "AW": ["Aruban"], + "AZ": ["Azerbaijani"], + "BA": ["Bosnian"], + "BB": ["Barbadian"], + "BF": ["Burkinabé"], + "BH": ["Bahraini"], + "BI": ["Burundian"], + "BJ": ["Beninese"], + "BM": ["Bermudian"], + "BN": ["Bruneian"], + "BO": ["Bolivian"], + "BS": ["Bahamian"], + "BT": ["Bhutanese"], + "BW": ["Botswanan"], + "BY": ["Belarusian"], + "BZ": ["Belizean"], + "CC": ["Cocos Islander"], + "CD": ["Congolese"], + "CF": ["Central African"], + "CG": ["Congolese"], + "CI": ["Ivorian"], + "CK": ["Cook Islander"], + "CM": ["Cameroonian"], + "CR": ["Costa Rican"], + "CU": ["Cuban"], + "CV": ["Cape Verdean"], + "CY": ["Cypriot"], + "DJ": ["Djiboutian"], + "DM": ["Dominican"], + "DO": ["Dominican"], + "DZ": ["Algerian"], + "EC": ["Ecuadorian"], + "EE": ["Estonian"], + "EH": ["Sahrawi"], + "ER": ["Eritrean"], + "ET": ["Ethiopian"], + "FJ": ["Fijian"], + "FK": ["Falkland"], + "FM": ["Micronesian"], + "FO": ["Faroese"], + "GA": ["Gabonese"], + "GD": ["Grenadian"], + "GE": ["Georgian"], + "GF": ["French Guianese"], + "GH": ["Ghanaian"], + "GI": ["Gibraltarian"], + "GL": ["Greenlandic"], + "GM": ["Gambian"], + "GN": ["Guinean"], + "GP": ["Guadeloupean"], + "GQ": ["Equatorial Guinean"], + "GS": ["South Georgian"], + "GT": ["Guatemalan"], + "GU": ["Guamanian"], + "GW": ["Guinea-Bissauan"], + "GY": ["Guyanese"], + "HM": ["Heard Islander"], + "HN": ["Honduran"], + "HT": ["Haitian"], + "IO": ["British Indian Ocean"], + "IQ": ["Iraqi"], + "JM": ["Jamaican"], + "JO": ["Jordanian"], + "KE": ["Kenyan"], + "KG": ["Kyrgyz"], + "KI": ["Kiribati"], + "KM": ["Comoran"], + "KN": ["Kittitian"], + "KP": ["North Korean"], + "KW": ["Kuwaiti"], + "KY": ["Caymanian"], + "KZ": ["Kazakh"], + "LB": ["Lebanese"], + "LC": ["Saint Lucian"], + "LI": ["Liechtensteiner"], + "LR": ["Liberian"], + "LS": ["Basotho"], + "LT": ["Lithuanian"], + "LU": ["Luxembourgish"], + "LV": ["Latvian"], + "LY": ["Libyan"], + "MA": ["Moroccan"], + "MC": ["Monacan"], + "MD": ["Moldovan"], + "ME": ["Montenegrin"], + "MG": ["Malagasy"], + "MH": ["Marshallese"], + "MK": ["Macedonian"], + "ML": ["Malian"], + "MN": ["Mongolian"], + "MO": ["Macanese"], + "MP": ["Northern Mariana"], + "MQ": ["Martiniquais"], + "MR": ["Mauritanian"], + "MS": ["Montserratian"], + "MT": ["Maltese"], + "MU": ["Mauritian"], + "MV": ["Maldivian"], + "MW": ["Malawian"], + "MZ": ["Mozambican"], + "NA": ["Namibian"], + "NC": ["New Caledonian"], + "NE": ["Nigerien"], + "NF": ["Norfolk Islander"], + "NI": ["Nicaraguan"], + "OM": ["Omani"], + "PA": ["Panamanian"], + "PF": ["French Polynesian"], + "PG": ["Papua New Guinean"], + "PM": ["Saint-Pierrais"], + "PN": ["Pitcairn"], + "PR": ["Puerto Rican"], + "PS": ["Palestinian"], + "PW": ["Palauan"], + "PY": ["Paraguayan"], + "QA": ["Qatari"], + "RE": ["Réunionnais"], + "RW": ["Rwandan"], + "SB": ["Solomon Islander"], + "SC": ["Seychellois"], + "SD": ["Sudanese"], + "SH": ["Saint Helenian"], + "SJ": ["Svalbard"], + "SL": ["Sierra Leonean"], + "SM": ["Sammarinese"], + "SN": ["Senegalese"], + "SO": ["Somali"], + "SR": ["Surinamese"], + "SS": ["South Sudanese"], + "ST": ["São Toméan"], + "SV": ["Salvadoran"], + "SY": ["Syrian"], + "SZ": ["Swazi"], + "TC": ["Turks and Caicos"], + "TD": ["Chadian"], + "TG": ["Togolese"], + "TJ": ["Tajik"], + "TK": ["Tokelauan"], + "TL": ["Timorese"], + "TM": ["Turkmen"], + "TN": ["Tunisian"], + "TO": ["Tongan"], + "TT": ["Trinidadian"], + "TV": ["Tuvaluan"], + "TZ": ["Tanzanian"], + "UA": ["Ukrainian"], + "UG": ["Ugandan"], + "UM": ["US Outlying"], + "UY": ["Uruguayan"], + "UZ": ["Uzbek"], + "VA": ["Vatican"], + "VC": ["Saint Vincentian"], + "VE": ["Venezuelan"], + "VG": ["British Virgin Islander"], + "VI": ["US Virgin Islander"], + "VU": ["Vanuatuan"], + "WF": ["Wallisian"], + "WS": ["Samoan"], + "YE": ["Yemeni"], + "YT": ["Mahoran"], + "ZM": ["Zambian"], + "ZW": ["Zimbabwean"], +} diff --git a/app/services/tmdb/genre.py b/app/services/tmdb/genre.py index 09cc163..e42eb93 100644 --- a/app/services/tmdb/genre.py +++ b/app/services/tmdb/genre.py @@ -43,3 +43,37 @@ MOVIE_GENRE_TO_ID_MAP = {genre: id for id, genre in movie_genres.items()} SERIES_GENRE_TO_ID_MAP = {genre: id for id, genre in series_genres.items()} + + +# Adjectives to spice up titles based on genres +GENRE_ADJECTIVES = { + # Movie Genres + 28: ["Adrenaline-Pumping", "Explosive", "Hard-Hitting"], # Action + 12: ["Epic", "Globe-Trotting", "Daring"], # Adventure + 16: ["Vibrant", "Imaginative", "Visually Stunning"], # Animation + 35: ["Laugh-Out-Loud", "Witty", "Feel-Good"], # Comedy + 80: ["Gritty", "Noir", "Underworld"], # Crime + 99: ["Eye-Opening", "Compelling", "Real-Life"], # Documentary + 18: ["Critically Acclaimed", "Powerful", "Emotional"], # Drama + 10751: ["Wholesome", "Heartfelt", "Family-Favorite"], # Family + 14: ["Magical", "Otherworldly", "Enchanting"], # Fantasy + 36: ["Timeless", "Legendary", "Historic"], # History + 27: ["Bone-Chilling", "Nightmarish", "Terrifying"], # Horror + 10402: ["Melodic", "Rhythmic", "Musical"], # Music + 9648: ["Mysterious", "Puzzle-Box", "Twisted"], # Mystery + 10749: ["Heartwarming", "Passionate", "Bittersweet"], # Romance + 878: ["Mind-Bending", "Futuristic", "Dystopian"], # Science Fiction + 10770: ["Exclusive", "Feature-Length", "Made-for-TV"], # TV Movie + 53: ["Edge-of-your-Seat", "Suspenseful", "Slow-Burn"], # Thriller + 10752: ["Intense", "Heroic", "Battle-Hardened"], # War + 37: ["Lawless", "Gunslinging", "Wild West"], # Western + # TV Specific Genres + 10759: ["Action-Packed", "High-Stakes", "Daring"], # Action & Adventure + 10762: ["Fun-Filled", "Playful", "Educational"], # Kids + 10763: ["In-Depth", "Current", "Breaking"], # News + 10764: ["Unscripted", "Dramatic", "Binge-Worthy"], # Reality + 10765: ["Fantastical", "Sci-Fi", "Supernatural"], # Sci-Fi & Fantasy + 10766: ["Scandalous", "Dramatic", "Emotional"], # Soap + 10767: ["Conversational", "Insightful", "Engaging"], # Talk + 10768: ["Political", "Strategic", "Controversial"], # War & Politics +} diff --git a/app/services/tmdb_service.py b/app/services/tmdb_service.py index d8c6eaa..1ea9a5c 100644 --- a/app/services/tmdb_service.py +++ b/app/services/tmdb_service.py @@ -1,16 +1,21 @@ +import asyncio +import random + import httpx from async_lru import alru_cache from loguru import logger from app.core.config import settings +from app.core.version import __version__ class TMDBService: """Service for interacting with The Movie Database (TMDB) API.""" - def __init__(self): + def __init__(self, language: str = "en-US"): self.api_key = settings.TMDB_API_KEY self.base_url = "https://api.themoviedb.org/3" + self.language = language # Reuse HTTP client for connection pooling and better performance self._client: httpx.AsyncClient | None = None if not self.api_key: @@ -22,6 +27,11 @@ async def _get_client(self) -> httpx.AsyncClient: self._client = httpx.AsyncClient( timeout=10.0, limits=httpx.Limits(max_keepalive_connections=20, max_connections=100), + http2=True, + headers={ + "User-Agent": f"Watchly/{__version__} (+https://github.com/TimilsinaBimal/Watchly)", + "Accept": "application/json", + }, ) return self._client @@ -36,32 +46,54 @@ async def _make_request(self, endpoint: str, params: dict | None = None) -> dict if not self.api_key: raise RuntimeError("TMDB_API_KEY is not configured. Set the environment variable to enable TMDB requests.") url = f"{self.base_url}{endpoint}" - default_params = {"api_key": self.api_key, "language": "en-US"} + default_params = {"api_key": self.api_key, "language": self.language} if params: default_params.update(params) - try: - client = await self._get_client() - response = await client.get(url, params=default_params) - response.raise_for_status() - - # Check if response has content - if not response.text: - logger.warning(f"TMDB API returned empty response for {endpoint}") - return {} - + attempts = 0 + last_exc: Exception | None = None + while attempts < 3: try: - return response.json() - except ValueError as e: - logger.error(f"TMDB API returned invalid JSON for {endpoint}: {e}. Response: {response.text[:200]}") - return {} - except httpx.HTTPStatusError as e: - logger.error(f"TMDB API error for {endpoint}: {e.response.status_code} - {e.response.text[:200]}") - raise - except httpx.RequestError as e: - logger.error(f"TMDB API request error for {endpoint}: {e}") - raise + client = await self._get_client() + response = await client.get(url, params=default_params) + response.raise_for_status() + + if not response.text: + logger.warning(f"TMDB API returned empty response for {endpoint}") + return {} + + try: + return response.json() + except ValueError as e: + logger.error( + f"TMDB API returned invalid JSON for {endpoint}: {e}. Response: {response.text[:200]}" + ) + return {} + except httpx.HTTPStatusError as e: + status = e.response.status_code + # Retry on 429 or 5xx + if status == 429 or 500 <= status < 600: + attempts += 1 + backoff = (2 ** (attempts - 1)) + random.uniform(0, 0.25) + logger.warning(f"TMDB {endpoint} failed with {status}; retry {attempts}/3 in {backoff:.2f}s") + await asyncio.sleep(backoff) + last_exc = e + continue + logger.error(f"TMDB API error for {endpoint}: {status} - {e.response.text[:200]}") + raise + except httpx.RequestError as e: + attempts += 1 + backoff = (2 ** (attempts - 1)) + random.uniform(0, 0.25) + logger.warning(f"TMDB request error for {endpoint}: {e}; retry {attempts}/3 in {backoff:.2f}s") + await asyncio.sleep(backoff) + last_exc = e + continue + + # Exhausted retries + if last_exc: + raise last_exc + return {} @alru_cache(maxsize=2000) async def find_by_imdb_id(self, imdb_id: str) -> tuple[int | None, str | None]: @@ -107,13 +139,13 @@ async def find_by_imdb_id(self, imdb_id: str) -> tuple[int | None, str | None]: @alru_cache(maxsize=5000) async def get_movie_details(self, movie_id: int) -> dict: """Get details of a specific movie with credits and external IDs.""" - params = {"append_to_response": "credits,external_ids"} + params = {"append_to_response": "credits,external_ids,keywords"} return await self._make_request(f"/movie/{movie_id}", params=params) @alru_cache(maxsize=5000) async def get_tv_details(self, tv_id: int) -> dict: """Get details of a specific TV series with credits and external IDs.""" - params = {"append_to_response": "credits,external_ids"} + params = {"append_to_response": "credits,external_ids,keywords"} return await self._make_request(f"/tv/{tv_id}", params=params) @alru_cache(maxsize=1000) @@ -137,12 +169,14 @@ async def get_discover( with_genres: str | None = None, sort_by: str = "popularity.desc", page: int = 1, + **kwargs, ) -> dict: """Get discover content based on params.""" media_type = "movie" if media_type == "movie" else "tv" params = {"page": page, "sort_by": sort_by} if with_genres: params["with_genres"] = with_genres - + if kwargs: + params.update(kwargs) endpoint = f"/discover/{media_type}" return await self._make_request(endpoint, params=params) diff --git a/app/services/token_store.py b/app/services/token_store.py index 40f7007..304a6be 100644 --- a/app/services/token_store.py +++ b/app/services/token_store.py @@ -1,6 +1,4 @@ import base64 -import hashlib -import hmac import json from collections.abc import AsyncIterator from typing import Any @@ -8,25 +6,28 @@ import redis.asyncio as redis from cachetools import TTLCache from cryptography.fernet import Fernet, InvalidToken +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC from loguru import logger from app.core.config import settings +from app.core.security import redact_token class TokenStore: """Redis-backed store for user credentials and auth tokens.""" - KEY_PREFIX = "watchly:token:" + KEY_PREFIX = settings.REDIS_TOKEN_KEY def __init__(self) -> None: self._client: redis.Redis | None = None - self._cipher: Fernet | None = None # Cache decrypted payloads for 1 day (86400s) to reduce Redis hits # Max size 5000 allows many active users without eviction self._payload_cache: TTLCache = TTLCache(maxsize=5000, ttl=86400) if not settings.REDIS_URL: logger.warning("REDIS_URL is not set. Token storage will fail until a Redis instance is configured.") + if not settings.TOKEN_SALT or settings.TOKEN_SALT == "change-me": logger.warning( "TOKEN_SALT is missing or using the default placeholder. Set a strong value to secure tokens." @@ -40,114 +41,108 @@ def _ensure_secure_salt(self) -> None: ) def _get_cipher(self) -> Fernet: - """Get or create Fernet cipher instance based on TOKEN_SALT.""" - if self._cipher is None: - # Derive a 32-byte key from TOKEN_SALT using SHA256, then URL-safe base64 encode it - # This ensures we always have a valid Fernet key regardless of the salt's format - key_bytes = hashlib.sha256(settings.TOKEN_SALT.encode()).digest() - fernet_key = base64.urlsafe_b64encode(key_bytes) - self._cipher = Fernet(fernet_key) - return self._cipher + salt = b"x7FDf9kypzQ1LmR32b8hWv49sKq2Pd8T" + kdf = PBKDF2HMAC( + algorithm=hashes.SHA256(), + length=32, + salt=salt, + iterations=200_000, + ) + + key = base64.urlsafe_b64encode(kdf.derive(settings.TOKEN_SALT.encode("utf-8"))) + return Fernet(key) + + def encrypt_token(self, token: str) -> str: + cipher = self._get_cipher() + return cipher.encrypt(token.encode("utf-8")).decode("utf-8") + + def decrypt_token(self, enc: str) -> str: + cipher = self._get_cipher() + return cipher.decrypt(enc.encode("utf-8")).decode("utf-8") async def _get_client(self) -> redis.Redis: if self._client is None: - self._client = redis.from_url(settings.REDIS_URL, decode_responses=True, encoding="utf-8") + # Add socket timeouts to avoid hanging on Redis operations + self._client = redis.from_url( + settings.REDIS_URL, + decode_responses=True, + encoding="utf-8", + socket_connect_timeout=5, + socket_timeout=5, + ) return self._client - def _hash_token(self, token: str) -> str: - secret = settings.TOKEN_SALT.encode("utf-8") - return hmac.new(secret, msg=token.encode("utf-8"), digestmod=hashlib.sha256).hexdigest() - - def _format_key(self, hashed_token: str) -> str: - return f"{self.KEY_PREFIX}{hashed_token}" - - def _normalize_payload(self, payload: dict[str, Any]) -> dict[str, Any]: - return { - "username": (payload.get("username") or "").strip() or None, - "password": payload.get("password") or None, - "authKey": (payload.get("authKey") or "").strip() or None, - "includeWatched": bool(payload.get("includeWatched", False)), - } - - def _derive_token_value(self, payload: dict[str, Any]) -> str: - canonical = { - "username": payload.get("username") or "", - "password": payload.get("password") or "", - "authKey": payload.get("authKey") or "", - "includeWatched": bool(payload.get("includeWatched", False)), - } - serialized = json.dumps(canonical, sort_keys=True, separators=(",", ":")) - secret = settings.TOKEN_SALT.encode("utf-8") - return hmac.new(secret, serialized.encode("utf-8"), hashlib.sha256).hexdigest() - - async def store_payload(self, payload: dict[str, Any]) -> tuple[str, bool]: + def _format_key(self, token: str) -> str: + """Format Redis key from token.""" + return f"{self.KEY_PREFIX}{token}" + + def get_token_from_user_id(self, user_id: str) -> str: + return user_id.strip() + + def get_user_id_from_token(self, token: str) -> str: + return token.strip() if token else "" + + async def store_user_data(self, user_id: str, payload: dict[str, Any]) -> str: self._ensure_secure_salt() - normalized = self._normalize_payload(payload) - token = self._derive_token_value(normalized) - hashed = self._hash_token(token) - key = self._format_key(hashed) + token = self.get_token_from_user_id(user_id) + key = self._format_key(token) + + # Prepare data for storage (Plain JSON, no encryption needed) + storage_data = payload.copy() - # JSON Encode -> Encrypt -> Store - json_str = json.dumps(normalized) - encrypted_value = self._get_cipher().encrypt(json_str.encode()).decode("utf-8") + # Store user_id in payload for convenience + storage_data["user_id"] = user_id + + if storage_data.get("authKey"): + storage_data["authKey"] = self.encrypt_token(storage_data["authKey"]) client = await self._get_client() - existing = await client.exists(key) + json_str = json.dumps(storage_data) if settings.TOKEN_TTL_SECONDS and settings.TOKEN_TTL_SECONDS > 0: - await client.setex(key, settings.TOKEN_TTL_SECONDS, encrypted_value) - logger.info( - f"Stored encrypted credential payload with TTL {settings.TOKEN_TTL_SECONDS} seconds", - ) + await client.setex(key, settings.TOKEN_TTL_SECONDS, json_str) else: - await client.set(key, encrypted_value) - logger.info("Stored encrypted credential payload without expiration") + await client.set(key, json_str) - # Cache the new payload immediately to avoid next-read hit - self._payload_cache[token] = normalized + # Update cache with the payload + self._payload_cache[token] = payload - return token, not bool(existing) + return token - async def get_payload(self, token: str) -> dict[str, Any] | None: - # Check local LRU cache first + async def get_user_data(self, token: str) -> dict[str, Any] | None: if token in self._payload_cache: return self._payload_cache[token] - hashed = self._hash_token(token) - key = self._format_key(hashed) + key = self._format_key(token) client = await self._get_client() - encrypted_raw = await client.get(key) + data_raw = await client.get(key) - if encrypted_raw is None: + if not data_raw: return None try: - # Decrypt -> JSON Decode - decrypted_json = self._get_cipher().decrypt(encrypted_raw.encode()).decode("utf-8") - payload = json.loads(decrypted_json) - - # Cache for subsequent reads - self._payload_cache[token] = payload - return payload - except (InvalidToken, json.JSONDecodeError, UnicodeDecodeError): - logger.warning("Failed to decrypt or decode cached payload for token. Key might have changed.") + data = json.loads(data_raw) + if data.get("authKey"): + data["authKey"] = self.decrypt_token(data["authKey"]) + self._payload_cache[token] = data + return data + except (json.JSONDecodeError, InvalidToken): return None async def delete_token(self, token: str = None, key: str = None) -> None: if not token and not key: raise ValueError("Either token or key must be provided") if token: - hashed = self._hash_token(token) - key = self._format_key(hashed) + key = self._format_key(token) + client = await self._get_client() await client.delete(key) # Invalidate local cache - if token in self._payload_cache: + if token and token in self._payload_cache: del self._payload_cache[token] async def iter_payloads(self) -> AsyncIterator[tuple[str, dict[str, Any]]]: - """Iterate over all stored payloads, yielding key and payload.""" try: client = await self._get_client() except (redis.RedisError, OSError) as exc: @@ -155,24 +150,22 @@ async def iter_payloads(self) -> AsyncIterator[tuple[str, dict[str, Any]]]: return pattern = f"{self.KEY_PREFIX}*" - cipher = self._get_cipher() try: async for key in client.scan_iter(match=pattern): try: - encrypted_raw = await client.get(key) + data_raw = await client.get(key) except (redis.RedisError, OSError) as exc: - logger.warning(f"Failed to fetch payload for {key}: {exc}") + logger.warning(f"Failed to fetch payload for {redact_token(key)}: {exc}") continue - if encrypted_raw is None: + if not data_raw: continue try: - decrypted_json = cipher.decrypt(encrypted_raw.encode()).decode("utf-8") - payload = json.loads(decrypted_json) - except (InvalidToken, json.JSONDecodeError, UnicodeDecodeError): - logger.warning(f"Failed to decrypt payload for key {key}. Skipping.") + payload = json.loads(data_raw) + except json.JSONDecodeError: + logger.warning(f"Failed to decode payload for key {redact_token(key)}. Skipping.") continue yield key, payload diff --git a/app/services/translation.py b/app/services/translation.py new file mode 100644 index 0000000..80deb9c --- /dev/null +++ b/app/services/translation.py @@ -0,0 +1,31 @@ +import asyncio + +from async_lru import alru_cache +from deep_translator import GoogleTranslator +from loguru import logger + + +class TranslationService: + @alru_cache(maxsize=1000, ttl=7 * 24 * 60 * 60) + async def translate(self, text: str, target_lang: str | None) -> str: + if not text or not target_lang: + return text + + # Normalize lang (e.g. en-US -> en) + lang = target_lang.split("-")[0].lower() + if lang == "en": + return text + + try: + loop = asyncio.get_running_loop() + + translated = await loop.run_in_executor( + None, lambda: GoogleTranslator(source="auto", target=lang).translate(text) + ) + return translated if translated else text + except Exception as e: + logger.warning(f"Translation failed for '{text}' to '{lang}': {e}") + return text + + +translation_service = TranslationService() diff --git a/app/services/user_profile.py b/app/services/user_profile.py new file mode 100644 index 0000000..2c0cbba --- /dev/null +++ b/app/services/user_profile.py @@ -0,0 +1,294 @@ +import asyncio +from collections import defaultdict + +from app.models.profile import UserTasteProfile +from app.models.scoring import ScoredItem +from app.services.tmdb_service import TMDBService + +# TODO: Make these weights dynamic based on user's preferences. +GENRES_WEIGHT = 0.3 +KEYWORDS_WEIGHT = 0.40 +CAST_WEIGHT = 0.1 +CREW_WEIGHT = 0.1 +YEAR_WEIGHT = 0.05 +COUNTRIES_WEIGHT = 0.05 +BASE_GENRE_WEIGHT = 0.15 + + +def emphasis(x: float) -> float: + """ + Non-linear boost for strong preferences. + """ + return x**1.25 + + +def safe_div(a, b): + return a / b if b else 0.0 + + +class UserProfileService: + """ + Service to build a User Taste Profile using Sparse Vectors. + + It converts user's watched/loved items into high-dimensional sparse vectors + based on metadata (genres, keywords, cast, crew) and aggregates them into + a single 'User Vector' representing their taste. + """ + + def __init__(self): + self.tmdb_service = TMDBService() + + async def build_user_profile( + self, + scored_items: list[ScoredItem], + content_type: str | None = None, + excluded_genres: list[int] | None = None, + ) -> UserTasteProfile: + """ + Aggregates multiple item vectors into a single User Taste Profile. + Optionally filters by content_type (movie/series) to build specific profiles. + """ + # Use internal dicts for aggregation first, then convert to Pydantic + profile_data = { + "genres": defaultdict(float), + "keywords": defaultdict(float), + "cast": defaultdict(float), + "crew": defaultdict(float), + "years": defaultdict(float), + "countries": defaultdict(float), + } + + async def _process(item): + # Filter by content type if specified + if content_type and item.item.type != content_type: + return None + + # Resolve ID + tmdb_id = await self._resolve_tmdb_id(item.item.id) + if not tmdb_id: + return None + + # Fetch full details including keywords and credits + meta = await self._fetch_full_metadata(tmdb_id, item.item.type) + if not meta: + return None + + # Vectorize this single item + item_vector = self._vectorize_item(meta) + + # Scale by Interest Score (0.0 - 1.0) + interest_weight = item.score / 100.0 + + return item_vector, interest_weight + + # Launch all item processing coroutines in parallel + tasks = [_process(item) for item in scored_items] + results = await asyncio.gather(*tasks) + + # Merge results sequentially to avoid interleaved writes + for res in results: + if res is None: + continue + item_vector, interest_weight = res + self._merge_vector(profile_data, item_vector, interest_weight, excluded_genres) + + # Convert to Pydantic Model + profile = UserTasteProfile( + genres={"values": dict(profile_data["genres"])}, + keywords={"values": dict(profile_data["keywords"])}, + cast={"values": dict(profile_data["cast"])}, + crew={"values": dict(profile_data["crew"])}, + years={"values": dict(profile_data["years"])}, + countries={"values": dict(profile_data["countries"])}, + ) + + # Normalize all vectors to 0-1 range + profile.normalize_all() + + return profile + + def calculate_similarity(self, profile: UserTasteProfile, item_meta: dict) -> float: + """ + Final improved similarity scoring function. + Uses normalized sparse matching + rarity boosting + non-linear emphasis. + """ + + item_vec = self._vectorize_item(item_meta) + + score = 0.0 + + # 1. GENRES + # Normalize so movies with many genres don't get excessive score. + for gid in item_vec["genres"]: + pref = profile.genres.values.get(gid, 0.0) + + if pref > 0: + s = emphasis(pref) + s = safe_div(s, len(item_vec["genres"])) + score += s * GENRES_WEIGHT + + # Soft prior bias (genre-only) + base_pref = profile.top_genres_normalized.get(gid, 0.0) + score += base_pref * BASE_GENRE_WEIGHT + + # 2. KEYWORDS + for kw in item_vec["keywords"]: + pref = profile.keywords.values.get(kw, 0.0) + + if pref > 0: + s = emphasis(pref) + s = safe_div(s, len(item_vec["keywords"])) + score += s * KEYWORDS_WEIGHT + + # 3. CAST + for cid in item_vec["cast"]: + pref = profile.cast.values.get(cid, 0.0) + + if pref > 0: + s = emphasis(pref) + s = safe_div(s, len(item_vec["cast"])) + score += s * CAST_WEIGHT + + # 4. CREW + for cr in item_vec["crew"]: + pref = profile.crew.values.get(cr, 0.0) + + if pref > 0: + s = emphasis(pref) + s = safe_div(s, len(item_vec["crew"])) + score += s * CREW_WEIGHT + + # 5. COUNTRIES + for c in item_vec["countries"]: + pref = profile.countries.values.get(c, 0.0) + + if pref > 0: + s = emphasis(pref) + s = safe_div(s, len(item_vec["countries"])) + score += s * COUNTRIES_WEIGHT + + # 6. YEAR/DECADE + # Reward matches on the user's preferred decades, with soft credit to adjacent decades. + item_year = item_vec.get("year") + if item_year is not None: + base_pref = profile.years.values.get(item_year, 0.0) + if base_pref > 0: + score += emphasis(base_pref) * YEAR_WEIGHT + else: + # Soft-match adjacent decades at half strength + prev_decade = item_year - 10 + next_decade = item_year + 10 + neighbor_pref = 0.0 + if prev_decade in profile.years.values: + neighbor_pref = max(neighbor_pref, profile.years.values.get(prev_decade, 0.0)) + if next_decade in profile.years.values: + neighbor_pref = max(neighbor_pref, profile.years.values.get(next_decade, 0.0)) + if neighbor_pref > 0: + score += emphasis(neighbor_pref) * (YEAR_WEIGHT * 0.5) + + return score + + def _vectorize_item(self, meta: dict) -> dict[str, list[int] | int | list[str] | None]: + """ + Converts raw TMDB metadata into a sparse vector format. + Returns lists of IDs or values. + """ + # extract keywords + keywords = meta.get("keywords", {}).get("keywords", []) + if not keywords: + keywords = meta.get("keywords", {}).get("results", []) + + # extract countries (origin_country is list of strings like ["US", "GB"]) + # In details response, it might be production_countries list of dicts + countries = [] + if "production_countries" in meta: + countries = [c.get("iso_3166_1") for c in meta.get("production_countries", []) if c.get("iso_3166_1")] + elif "origin_country" in meta: + countries = meta.get("origin_country", []) + + vector = { + "genres": [g["id"] for g in meta.get("genres", [])], + "keywords": [k["id"] for k in keywords], + "cast": [], + "crew": [], + "year": None, + "countries": countries, + } + + # Cast (Top 3 only to reduce noise) + cast = meta.get("credits", {}).get("cast", []) + if not cast: + pass + + vector["cast"] = [c["id"] for c in cast[:3]] + + # Crew (Directors only) + crew = meta.get("credits", {}).get("crew", []) + vector["crew"] = [c["id"] for c in crew if c["job"] == "Director"] + + # Year Bucket (Decades: 2010, 2020, etc.) + date_str = meta.get("release_date") or meta.get("first_air_date") + if date_str: + try: + year = int(date_str[:4]) + vector["year"] = (year // 10) * 10 + except (ValueError, TypeError): + pass + + return vector + + def _merge_vector( + self, + profile: dict, + item_vector: dict, + weight: float, + excluded_genres: list[int] | None = None, + ): + """Merges an item's sparse vector into the main profile with a weight.""" + + # Weights for specific dimensions (Feature Importance) + DIM_WEIGHTS = { + "genres": GENRES_WEIGHT, + "keywords": KEYWORDS_WEIGHT, + "cast": CAST_WEIGHT, + "crew": CREW_WEIGHT, + "year": YEAR_WEIGHT, + "countries": COUNTRIES_WEIGHT, + } + + for dim, ids in item_vector.items(): + dim_weight = DIM_WEIGHTS.get(dim, 1.0) + final_weight = weight * dim_weight + + if dim == "year": + if ids is not None: # ids is a single int for year + profile["years"][ids] += final_weight + elif ids: + for feature_id in ids: + if dim == "genres" and excluded_genres and feature_id in excluded_genres: + continue + profile[dim][feature_id] += final_weight + + async def _fetch_full_metadata(self, tmdb_id: int, type_: str) -> dict | None: + """Helper to fetch deep metadata.""" + try: + if type_ == "movie": + return await self.tmdb_service.get_movie_details(tmdb_id) + else: + return await self.tmdb_service.get_tv_details(tmdb_id) + except Exception: + return None + + async def _resolve_tmdb_id(self, stremio_id: str) -> int | None: + """Resolve Stremio ID (tt... or tmdb:...) to TMDB ID.""" + if stremio_id.startswith("tmdb:"): + try: + return int(stremio_id.split(":")[1]) + except (ValueError, IndexError): + return None + + if stremio_id.startswith("tt"): + tmdb_id, _ = await self.tmdb_service.find_by_imdb_id(stremio_id) + return tmdb_id + + return None diff --git a/app/startup/migration.py b/app/startup/migration.py new file mode 100644 index 0000000..432df02 --- /dev/null +++ b/app/startup/migration.py @@ -0,0 +1,249 @@ +import base64 +import hashlib +import json +import traceback + +import httpx +import redis.asyncio as redis +from cryptography.fernet import Fernet +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC +from loguru import logger + +from app.core.config import settings + + +def decrypt_data(enc_json: str): + key_bytes = hashlib.sha256(settings.TOKEN_SALT.encode()).digest() + fernet_key = base64.urlsafe_b64encode(key_bytes) + cipher = Fernet(fernet_key) + if not isinstance(enc_json, str): + return {} + try: + decrypted = cipher.decrypt(enc_json.encode()).decode() + except Exception as exc: + logger.warning(f"Failed to decrypt data: {exc}") + raise exc + return json.loads(decrypted) + + +async def get_auth_key(username: str, password: str): + url = "https://api.strem.io/api/login" + payload = { + "email": username, + "password": password, + "type": "Login", + "facebook": False, + } + async with httpx.AsyncClient(timeout=10.0) as client: + result = await client.post(url, json=payload) + result.raise_for_status() + data = result.json() + auth_key = data.get("result", {}).get("authKey", "") + return auth_key + + +async def get_user_info(auth_key): + url = "https://api.strem.io/api/getUser" + payload = { + "type": "GetUser", + "authKey": auth_key, + } + async with httpx.AsyncClient(timeout=10.0) as client: + response = await client.post(url, json=payload) + response.raise_for_status() + data = response.json() + result = data.get("result", {}) + email = result.get("email") + user_id = result.get("_id") + return email, user_id + + +async def get_addons(auth_key: str): + url = "https://api.strem.io/api/addonCollectionGet" + payload = { + "type": "AddonCollectionGet", + "authKey": auth_key, + "update": True, + } + async with httpx.AsyncClient(timeout=10.0) as client: + result = await client.post(url, json=payload) + result.raise_for_status() + data = result.json() + error_payload = data.get("error") + if not error_payload and (data.get("code") and data.get("message")): + error_payload = data + + if error_payload: + message = "Invalid Stremio auth key." + if isinstance(error_payload, dict): + message = error_payload.get("message") or message + elif isinstance(error_payload, str): + message = error_payload or message + logger.warning(f"Addon collection request failed: {error_payload}") + raise ValueError(f"Stremio: {message}") + addons = data.get("result", {}).get("addons", []) + logger.info(f"Found {len(addons)} addons") + return addons + + +async def update_addon_url(auth_key: str, user_id: str): + addons = await get_addons(auth_key) + hostname = settings.HOST_NAME if settings.HOST_NAME.startswith("https") else f"https://{settings.HOST_NAME}" + for addon in addons: + if addon.get("manifest", {}).get("id") == settings.ADDON_ID: + addon["transportUrl"] = f"{hostname}/{user_id}/manifest.json" + + url = "https://api.strem.io/api/addonCollectionSet" + payload = { + "type": "AddonCollectionSet", + "authKey": auth_key, + "addons": addons, + } + + async with httpx.AsyncClient(timeout=10.0) as client: + result = await client.post(url, json=payload) + result.raise_for_status() + logger.info("Updated addon url") + return result.json().get("result", {}).get("success", False) + + +async def decode_old_payloads(encrypted_raw: str): + key_bytes = hashlib.sha256(settings.TOKEN_SALT.encode()).digest() + fernet_key = base64.urlsafe_b64encode(key_bytes) + cipher = Fernet(fernet_key) + decrypted_json = cipher.decrypt(encrypted_raw.encode()).decode("utf-8") + payload = json.loads(decrypted_json) + return payload + + +def encrypt_auth_key(auth_key): + salt = b"x7FDf9kypzQ1LmR32b8hWv49sKq2Pd8T" + kdf = PBKDF2HMAC( + algorithm=hashes.SHA256(), + length=32, + salt=salt, + iterations=200_000, + ) + + key = base64.urlsafe_b64encode(kdf.derive(settings.TOKEN_SALT.encode("utf-8"))) + client = Fernet(key) + return client.encrypt(auth_key.encode("utf-8")).decode("utf-8") + + +def prepare_default_payload(email, user_id): + return { + "email": email, + "user_id": user_id, + "settings": { + "catalogs": [ + {"id": "watchly.rec", "name": "Recommended", "enabled": True}, + {"id": "watchly.loved", "name": "Because You Loved", "enabled": True}, + {"id": "watchly.watched", "name": "Because You Watched", "enabled": True}, + {"id": "watchly.theme", "name": "Genre & Theme Collections", "enabled": True}, + ], + "language": "en", + "rpdb_key": "", + "excluded_movie_genres": [], + "excluded_series_genres": [], + }, + } + + +async def store_payload(client: redis.Redis, email: str, user_id: str, auth_key: str): + payload = prepare_default_payload(email, user_id) + logger.info(f"Storing payload for {user_id}: {payload}") + try: + # encrypt auth_key + if auth_key: + payload["authKey"] = encrypt_auth_key(auth_key) + key = user_id.strip() + await client.set(key, json.dumps(payload)) + except (redis.RedisError, OSError) as exc: + logger.warning(f"Failed to store payload for {key}: {exc}") + + +async def process_migration_key(redis_client: redis.Redis, key: str) -> bool: + try: + try: + data_raw = await redis_client.get(key) + except (redis.RedisError, OSError) as exc: + logger.warning(f"Failed to fetch payload for {key}: {exc}") + return False + + if not data_raw: + logger.warning(f"Failed to fetch payload for {key}: Empty data") + return False + + try: + payload = await decode_old_payloads(data_raw) + except (json.JSONDecodeError, Exception) as exc: + logger.warning(f"Failed to decode payload for key {key}: {exc}") + return False + + if payload.get("username") and payload.get("password"): + auth_key = await get_auth_key(payload["username"], payload["password"]) + elif payload.get("authKey"): + auth_key = payload.get("authKey") + else: + logger.warning(f"Failed to migrate {key}") + await redis_client.delete(key) + return False + + email, user_id = await get_user_info(auth_key) + if not email or not user_id: + logger.warning(f"Failed to migrate {key}") + await redis_client.delete(key) + return False + + new_payload = prepare_default_payload(email, user_id) + if auth_key: + new_payload["authKey"] = encrypt_auth_key(auth_key) + + new_key = user_id.strip() + payload_json = json.dumps(new_payload) + + if settings.TOKEN_TTL_SECONDS and settings.TOKEN_TTL_SECONDS > 0: + set_success = await redis_client.set(new_key, payload_json, ex=settings.TOKEN_TTL_SECONDS, nx=True) + if set_success: + logger.info( + f"Stored encrypted credential payload with TTL {settings.TOKEN_TTL_SECONDS} seconds (SETNX)" + ) + else: + set_success = await redis_client.setnx(new_key, payload_json) + if set_success: + logger.info("Stored encrypted credential payload without expiration (SETNX)") + + if not set_success: + logger.info(f"Credential payload for {new_key} already exists, not overwriting.") + + await redis_client.delete(key) + logger.info(f"Migrated {key} to {new_key}") + return True + + except Exception as exc: + await redis_client.delete(key) + traceback.print_exc() + logger.warning(f"Failed to migrate {key}: {exc}") + return False + + +async def migrate_tokens(): + total_tokens = 0 + failed_tokens = 0 + success_tokens = 0 + try: + redis_client = redis.from_url(settings.REDIS_URL, decode_responses=True, encoding="utf-8") + except (redis.RedisError, OSError) as exc: + logger.warning(f"Failed to connect to Redis: {exc}") + return + + pattern = f"{settings.REDIS_TOKEN_KEY}*" + async for key in redis_client.scan_iter(match=pattern): + total_tokens += 1 + if await process_migration_key(redis_client, key): + success_tokens += 1 + else: + failed_tokens += 1 + + logger.info(f"[STATS] Total: {total_tokens}, Failed: {failed_tokens}, Success: {success_tokens}") diff --git a/app/static/index.html b/app/static/index.html new file mode 100644 index 0000000..96b9773 --- /dev/null +++ b/app/static/index.html @@ -0,0 +1,680 @@ + + + + + + + Watchly - Personalized Stremio Recommendations + + + + + + + + + + +
+ Watchly +

Watchly

+ +
+ + + + +
+
+ +
+ +
+ +
+
+ Version +
+
+ + +
+
+
+ Watchly +

+ Watchly +

+
+
+

+ Personalized Recommendation Engine for Stremio +

+

+ Discover movies and series tailored to your unique taste, powered by your Stremio library and watch history. +

+ + +
+ + +
+ +
+
+ + + +
+

Smart Recommendations

+

+ AI-powered suggestions based on your watch history +

+
+ + +
+
+ + + +
+

Custom Catalogs

+

+ Organize with customizable names and order +

+
+ + +
+
+ + + + +
+

Genre Filtering

+

+ Exclude genres you don't like +

+
+ +
+
+ + + + +
+

Multi-Language

+

+ Recommendations in your preferred language +

+
+ + +
+
+ + + + +
+

RPDB Integration

+

+ Enhanced posters with ratings +

+
+ + +
+
+ + + + +
+

Based on Your Loves

+

+ Recommendations from content you loved +

+
+
+ +
+
+ +
+ + + +
+
+ + + + + + + + + + + + +
+ + + + +
+
+ + + + + +
+ + + + + + + + diff --git a/static/logo.png b/app/static/logo.png similarity index 100% rename from static/logo.png rename to app/static/logo.png diff --git a/app/static/script.js b/app/static/script.js new file mode 100644 index 0000000..c4066c9 --- /dev/null +++ b/app/static/script.js @@ -0,0 +1,941 @@ +// Default catalog configurations +const defaultCatalogs = [ + { id: 'watchly.rec', name: 'Top Picks for You', enabled: true, description: 'Personalized recommendations based on your library' }, + { id: 'watchly.loved', name: 'More Like', enabled: true, description: 'Recommendations similar to content you explicitly loved' }, + { id: 'watchly.watched', name: 'Because You Watched', enabled: true, description: 'Recommendations based on your recent watch history' }, + { id: 'watchly.theme', name: 'Genre & Keyword Catalogs', enabled: true, description: 'Dynamic catalogs based on your favorite genres, keyword, countries and many more. Just like netflix. Example: American Horror, Based on Novel or Book etc.' }, +]; + +let catalogs = JSON.parse(JSON.stringify(defaultCatalogs)); + +// Genre Constants +const MOVIE_GENRES = [ + { id: '28', name: 'Action' }, { id: '12', name: 'Adventure' }, { id: '16', name: 'Animation' }, { id: '35', name: 'Comedy' }, { id: '80', name: 'Crime' }, { id: '99', name: 'Documentary' }, { id: '18', name: 'Drama' }, { id: '10751', name: 'Family' }, { id: '14', name: 'Fantasy' }, { id: '36', name: 'History' }, { id: '27', name: 'Horror' }, { id: '10402', name: 'Music' }, { id: '9648', name: 'Mystery' }, { id: '10749', name: 'Romance' }, { id: '878', name: 'Science Fiction' }, { id: '10770', name: 'TV Movie' }, { id: '53', name: 'Thriller' }, { id: '10752', name: 'War' }, { id: '37', name: 'Western' } +]; + +const SERIES_GENRES = [ + { id: '10759', name: 'Action & Adventure' }, { id: '16', name: 'Animation' }, { id: '35', name: 'Comedy' }, { id: '80', name: 'Crime' }, { id: '99', name: 'Documentary' }, { id: '18', name: 'Drama' }, { id: '10751', name: 'Family' }, { id: '10762', name: 'Kids' }, { id: '9648', name: 'Mystery' }, { id: '10763', name: 'News' }, { id: '10764', name: 'Reality' }, { id: '10765', name: 'Sci-Fi & Fantasy' }, { id: '10766', name: 'Soap' }, { id: '10767', name: 'Talk' }, { id: '10768', name: 'War & Politics' }, { id: '37', name: 'Western' } +]; + +// DOM Elements +const configForm = document.getElementById('configForm'); +const catalogList = document.getElementById('catalogList'); +const movieGenreList = document.getElementById('movieGenreList'); +const seriesGenreList = document.getElementById('seriesGenreList'); +const errorMessage = document.getElementById('errorMessage'); +const submitBtn = document.getElementById('submitBtn'); +const stremioLoginBtn = document.getElementById('stremioLoginBtn'); +const stremioLoginText = document.getElementById('stremioLoginText'); +const languageSelect = document.getElementById('languageSelect'); +const configNextBtn = document.getElementById('configNextBtn'); +const catalogsNextBtn = document.getElementById('catalogsNextBtn'); +const successResetBtn = document.getElementById('successResetBtn'); +const deleteAccountBtn = document.getElementById('deleteAccountBtn'); + +const navItems = { + welcome: document.getElementById('nav-welcome'), + login: document.getElementById('nav-login'), + config: document.getElementById('nav-config'), + catalogs: document.getElementById('nav-catalogs'), + install: document.getElementById('nav-install') +}; + +const sections = { + welcome: document.getElementById('sect-welcome'), + login: document.getElementById('sect-login'), + config: document.getElementById('sect-config'), + catalogs: document.getElementById('sect-catalogs'), + install: document.getElementById('sect-install'), + success: document.getElementById('sect-success') +}; + +// Welcome Elements +const btnGetStarted = document.getElementById('btn-get-started'); + + +// Initialize +document.addEventListener('DOMContentLoaded', () => { + // Start at Welcome + switchSection('welcome'); + initializeWelcomeFlow(); + + initializeNavigation(); + initializeCatalogList(); + initializeLanguageSelect(); + initializeGenreLists(); + initializeFormSubmission(); + initializeSuccessActions(); + initializeStremioLogin(); + initializeFooter(); + initializeKofi(); + initializeAnnouncement(); + + // Next Buttons + if (configNextBtn) configNextBtn.addEventListener('click', () => switchSection('catalogs')); + if (catalogsNextBtn) catalogsNextBtn.addEventListener('click', () => switchSection('install')); + + // Reset Buttons + document.getElementById('resetBtn')?.addEventListener('click', resetApp); + if (successResetBtn) successResetBtn.addEventListener('click', resetApp); +}); + + +// Welcome Flow Logic +function initializeWelcomeFlow() { + // Single "Get Started" button leads to Stremio login + if (btnGetStarted) { + btnGetStarted.addEventListener('click', () => { + navItems.login.classList.remove('disabled'); + switchSection('login'); + }); + } +} + + +// Navigation Logic +function initializeNavigation() { + Object.keys(navItems).forEach(key => { + navItems[key].addEventListener('click', () => { + if (!navItems[key].classList.contains('disabled')) { + switchSection(key); + } + }); + }); +} + +function unlockNavigation() { + Object.values(navItems).forEach(el => el.classList.remove('disabled')); +} + +function switchSection(sectionKey) { + // Hide all sections + Object.values(sections).forEach(el => { + if (el) el.classList.add('hidden'); + }); + + // Show target section + if (sections[sectionKey]) { + sections[sectionKey].classList.remove('hidden'); + } + + // Update Nav UI Logic + // Reset all nav items + Object.values(navItems).forEach(el => el.classList.remove('active', 'bg-blue-600/10', 'text-blue-400', 'border-l-2', 'border-blue-400')); + + // Activate current if exists in nav + if (navItems[sectionKey]) { + navItems[sectionKey].classList.add('active'); + } +} + + +function resetApp() { + if (configForm) configForm.reset(); + clearErrors(); + + // Reset Navigation is now Back to Welcome + switchSection('welcome'); + + // Lock Navs + Object.keys(navItems).forEach(key => { + if (key !== 'login') navItems[key].classList.add('disabled'); // Login is always enabled technically, but we hide it via switchSection('welcome') + }); + // Actually, we should probably disable 'login' too until they choose New/Existing User? + // But our nav click logic handles that. If we are at 'welcome', the sidebar is visible but inactive. + + // Reset Stremio State + + setStremioLoggedOutState(); + + // Reset catalogs + catalogs = JSON.parse(JSON.stringify(defaultCatalogs)); + renderCatalogList(); + + // Show Form + if (configForm) configForm.classList.remove('hidden'); + if (sections.success) sections.success.classList.add('hidden'); +} + + +// Stremio Login Logic +async function initializeStremioLogin() { + const urlParams = new URLSearchParams(window.location.search); + const authKey = urlParams.get('key') || urlParams.get('authKey'); + + if (authKey) { + // Logged In -> Unlock and move to config + setStremioLoggedInState(authKey); + + try { + await fetchStremioIdentity(authKey); + unlockNavigation(); + switchSection('config'); + } catch (error) { + showToast(error.message, "error"); + resetApp(); + return; + } + + // Remove query param + const newUrl = window.location.protocol + "//" + window.location.host + window.location.pathname; + window.history.replaceState({ path: newUrl }, '', newUrl); + } + + if (stremioLoginBtn) { + stremioLoginBtn.addEventListener('click', () => { + if (stremioLoginBtn.getAttribute('data-action') === 'logout') { + resetApp(); // Logout effectively resets the app flow + } else { + let appHost = window.APP_HOST; + if (!appHost || appHost.includes(' -
-

- Your Stremio,
- Reimagined. -

-

- Watchly analyzes your library to deliver personalized movie and TV show recommendations, - powered by TMDB's advanced discovery engine. -

- -
-
-
- -
- Based on loved content -
-
-
- -
- Hides watched items -
-
-
- -
- Instant updates -
-
-
- -
- Secure & Private -
-
-
- - -
-
- -

Configure Watchly

-
v
- -
- -
-
- -
- -
-
- - -
-
- -
-
- -
- -
-
- -
- -
-
- -
- - -
-
-
- - - - -
- -
- - -
-
- -
- - -
- - -
-
- -
-

You're all set!

-

- Your personalized catalog is ready to install. -

- -
- -
-
- -
-
-

Private Token

-

This URL contains your private access token. Don't share it with others.

-
-
- -
- - - - -
-
-
- - - - - - - - diff --git a/static/script.js b/static/script.js deleted file mode 100644 index d6cc278..0000000 --- a/static/script.js +++ /dev/null @@ -1,191 +0,0 @@ -document.addEventListener('DOMContentLoaded', function () { - const form = document.getElementById('configForm'); - const usernameInput = document.getElementById('username'); - const passwordInput = document.getElementById('password'); - const authKeyInput = document.getElementById('authKey'); - const authMethodSelect = document.getElementById('authMethod'); - const credentialsFields = document.getElementById('credentialsFields'); - const authKeyFieldWrapper = document.getElementById('authKeyField'); - const submitBtn = document.getElementById('submitBtn'); - const errorMessage = document.getElementById('errorMessage'); - const successMessage = document.getElementById('successMessage'); - const addonUrlBox = document.getElementById('addonUrl'); - const copyBtn = document.getElementById('copyBtn'); - const installDesktopBtn = document.getElementById('installDesktopBtn'); - const installWebBtn = document.getElementById('installWebBtn'); - const resetBtn = document.getElementById('resetBtn'); - const btnText = submitBtn.querySelector('.btn-text'); - const btnLoader = submitBtn.querySelector('.loader'); - const toggleButtons = document.querySelectorAll('.toggle-btn'); - - // Store the raw URL string since div doesn't have .value - let generatedUrl = ''; - - function showError(message) { - errorMessage.textContent = message; - errorMessage.style.display = 'block'; - } - - function hideError() { - errorMessage.style.display = 'none'; - } - - function setLoading(loading) { - submitBtn.disabled = loading; - if (loading) { - btnText.classList.add('hidden'); - btnLoader.classList.remove('hidden'); - } else { - btnText.classList.remove('hidden'); - btnLoader.classList.add('hidden'); - } - } - - function updateMethodFields() { - const method = authMethodSelect.value; - if (method === 'credentials') { - credentialsFields.classList.remove('hidden'); - authKeyFieldWrapper.classList.add('hidden'); - usernameInput.required = true; - passwordInput.required = true; - authKeyInput.required = false; - } else { - credentialsFields.classList.add('hidden'); - authKeyFieldWrapper.classList.remove('hidden'); - usernameInput.required = false; - passwordInput.required = false; - authKeyInput.required = true; - } - } - - authMethodSelect.addEventListener('change', () => { - updateMethodFields(); - hideError(); - }); - - // Password/AuthKey Visibility Toggles - toggleButtons.forEach(btn => { - btn.addEventListener('click', (e) => { - e.preventDefault(); - const targetId = btn.dataset.target; - const input = document.getElementById(targetId); - if (input) { - const isPassword = input.type === 'password'; - input.type = isPassword ? 'text' : 'password'; - btn.textContent = isPassword ? 'Hide' : 'Show'; - } - }); - }); - - // Help Alert for Auth Key - const showAuthHelp = document.getElementById('showAuthHelp'); - if (showAuthHelp) { - showAuthHelp.addEventListener('click', (e) => { - e.preventDefault(); - alert('To find your Auth Key:\n1. Go to web.strem.io\n2. Open Console (F12)\n3. Type: JSON.parse(localStorage.getItem("profile")).auth.key\n4. Copy the result (without quotes)'); - }); - } - - form.addEventListener('submit', async function (e) { - e.preventDefault(); - hideError(); - - const method = authMethodSelect.value; - const username = usernameInput.value.trim(); - const password = passwordInput.value; - const authKey = authKeyInput.value.trim(); - const includeWatched = document.querySelector('input[name="recommendationSource"]:checked').value === 'watched'; - - // Client-side validation - if (method === 'credentials') { - if (!username || !password) { - showError('Please enter both email and password.'); - return; - } - } else if (!authKey) { - showError('Please provide your Stremio Auth Key.'); - return; - } - - setLoading(true); - - try { - const response = await fetch('/tokens/', { - method: 'POST', - headers: { 'Content-Type': 'application/json' }, - body: JSON.stringify({ - username: method === 'credentials' ? username : null, - password: method === 'credentials' ? password : null, - authKey: method === 'authkey' ? authKey : null, - includeWatched - }) - }); - - if (!response.ok) { - const errorData = await response.json().catch(() => ({ detail: 'Failed to create token.' })); - throw new Error(errorData.detail || 'Failed to connect. Check credentials.'); - } - - const data = await response.json(); - generatedUrl = data.manifestUrl; - addonUrlBox.textContent = generatedUrl; - - form.classList.add('hidden'); - successMessage.style.display = 'block'; - - } catch (error) { - console.error('Error:', error); - showError(error.message); - } finally { - setLoading(false); - } - }); - - installDesktopBtn.addEventListener('click', function () { - if (!generatedUrl) return; - const stremioUrl = `stremio://${generatedUrl.replace(/^https?:\/\//, '')}`; - window.location.href = stremioUrl; - }); - - installWebBtn.addEventListener('click', function () { - if (!generatedUrl) return; - const stremioUrl = `https://web.stremio.com/#/addons?addon=${encodeURIComponent(generatedUrl)}`; - window.open(stremioUrl, '_blank'); - }); - - copyBtn.addEventListener('click', async function () { - if (!generatedUrl) return; - - try { - await navigator.clipboard.writeText(generatedUrl); - const originalText = copyBtn.textContent; - copyBtn.textContent = 'Copied!'; - copyBtn.classList.add('btn-primary'); - copyBtn.classList.remove('btn-outline'); - - setTimeout(() => { - copyBtn.textContent = originalText; - copyBtn.classList.remove('btn-primary'); - copyBtn.classList.add('btn-outline'); - }, 2000); - } catch (err) { - console.error('Failed to copy:', err); - showError('Failed to copy to clipboard'); - } - }); - - resetBtn.addEventListener('click', function () { - form.reset(); - authMethodSelect.value = 'credentials'; - updateMethodFields(); - - form.classList.remove('hidden'); - successMessage.style.display = 'none'; - hideError(); - generatedUrl = ''; - addonUrlBox.textContent = ''; - }); - - // Initialize - updateMethodFields(); -}); diff --git a/static/style.css b/static/style.css deleted file mode 100644 index 85a38af..0000000 --- a/static/style.css +++ /dev/null @@ -1,503 +0,0 @@ -@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap'); - -:root { - --primary: #0ea5e9; - --primary-hover: #0284c7; - --primary-light: rgba(14, 165, 233, 0.15); - --primary-glow: rgba(14, 165, 233, 0.4); - --bg-dark: #020617; - --bg-card: rgba(15, 23, 42, 0.6); - --text-main: #f8fafc; - --text-muted: #94a3b8; - --border: rgba(148, 163, 184, 0.1); - --gradient-start: #3b82f6; - --gradient-end: #06b6d4; - --success: #10b981; - --error: #ef4444; - --warning: #f59e0b; -} - -* { - margin: 0; - padding: 0; - box-sizing: border-box; -} - -body { - font-family: 'Inter', sans-serif; - background-color: var(--bg-dark); - background-image: - radial-gradient(circle at 0% 0%, rgba(59, 130, 246, 0.2) 0%, transparent 50%), - radial-gradient(circle at 100% 100%, rgba(6, 182, 212, 0.2) 0%, transparent 50%); - color: var(--text-main); - min-height: 100vh; - display: flex; - flex-direction: column; - align-items: center; - justify-content: center; - line-height: 1.6; -} - -.wrapper { - width: 100%; - max-width: 1200px; - padding: 2rem; - display: grid; - grid-template-columns: 1fr 1fr; - gap: 4rem; - align-items: center; -} - -@media (max-width: 968px) { - .wrapper { - grid-template-columns: 1fr; - gap: 2rem; - padding: 1rem; - } -} - -/* Hero Section */ -.hero { - padding-right: 2rem; -} - -.hero h1 { - font-size: 4.5rem; - font-weight: 800; - line-height: 1.1; - margin-bottom: 1.5rem; - background: linear-gradient(135deg, #fff 0%, #94a3b8 100%); - -webkit-background-clip: text; - -webkit-text-fill-color: transparent; - letter-spacing: -0.02em; -} - -.hero h1 .gradient-text { - background: linear-gradient(135deg, var(--gradient-start) 0%, var(--gradient-end) 100%); - -webkit-background-clip: text; - -webkit-text-fill-color: transparent; - filter: drop-shadow(0 0 20px rgba(59, 130, 246, 0.3)); -} - -.hero p { - font-size: 1.25rem; - color: var(--text-muted); - margin-bottom: 2.5rem; - max-width: 540px; -} - -.features { - display: grid; - grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); - gap: 1.5rem; -} - -.feature { - display: flex; - align-items: center; - gap: 0.75rem; - color: var(--text-muted); - font-size: 0.95rem; -} - -.feature-icon { - width: 32px; - height: 32px; - background: rgba(15, 23, 42, 0.8); - border: 1px solid var(--border); - color: var(--primary); - border-radius: 10px; - display: flex; - align-items: center; - justify-content: center; - box-shadow: 0 4px 12px rgba(0, 0, 0, 0.2); -} - -/* Config Card */ -.config-card { - background: var(--bg-card); - backdrop-filter: blur(24px); - -webkit-backdrop-filter: blur(24px); - border: 1px solid rgba(255, 255, 255, 0.05); - border-top: 1px solid rgba(255, 255, 255, 0.1); - border-radius: 24px; - padding: 2.5rem; - box-shadow: 0 25px 50px -12px rgba(0, 0, 0, 0.5), inset 0 0 0 1px rgba(255, 255, 255, 0.02); - width: 100%; - max-width: 500px; - margin: 0 auto; - position: relative; - overflow: hidden; -} - -/* Add a subtle shine effect */ -.config-card::before { - content: ''; - position: absolute; - top: 0; - left: 0; - right: 0; - height: 1px; - background: linear-gradient(90deg, transparent, rgba(255, 255, 255, 0.2), transparent); -} - -.card-header { - text-align: center; - margin-bottom: 2rem; -} - -.logo { - width: 72px; - height: 72px; - margin-bottom: 1.5rem; - filter: drop-shadow(0 0 25px rgba(59, 130, 246, 0.4)); - transition: transform 0.3s ease; -} - -.logo:hover { - transform: scale(1.05); -} - -/* Forms */ -.form-group { - margin-bottom: 1.5rem; -} - -.label { - display: block; - font-size: 0.875rem; - font-weight: 500; - color: var(--text-muted); - margin-bottom: 0.5rem; -} - -.input-wrapper { - position: relative; - display: flex; - align-items: center; -} - -.input-icon { - position: absolute; - left: 12px; - color: var(--text-muted); - pointer-events: none; - display: flex; - align-items: center; -} - -.input, .select { - width: 100%; - padding: 0.875rem 1rem; - background: rgba(15, 23, 42, 0.6); - border: 1px solid var(--border); - border-radius: 12px; - color: var(--text-main); - font-family: inherit; - font-size: 1rem; - transition: all 0.2s ease; -} - -.input.has-icon { - padding-left: 2.5rem; -} - -.input:focus, .select:focus { - outline: none; - border-color: var(--primary); - box-shadow: 0 0 0 2px var(--primary-light); - background: rgba(15, 23, 42, 0.8); -} - -.select { - appearance: none; - background-image: url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 20 20'%3e%3cpath stroke='%2394a3b8' stroke-linecap='round' stroke-linejoin='round' stroke-width='1.5' d='M6 8l4 4 4-4'/%3e%3c/svg%3e"); - background-position: right 0.75rem center; - background-repeat: no-repeat; - background-size: 1.25em 1.25em; - padding-right: 2.5rem; - cursor: pointer; -} - -/* Toggles */ -.toggle-btn { - position: absolute; - right: 0.75rem; - background: none; - border: none; - color: var(--primary); - font-size: 0.75rem; - font-weight: 600; - cursor: pointer; - padding: 0.25rem 0.5rem; - border-radius: 6px; - transition: all 0.2s; -} - -.toggle-btn:hover { - background: var(--primary-light); -} - -/* Radio Tiles */ -.radio-grid { - display: grid; - grid-template-columns: 1fr 1fr; - gap: 1rem; -} - -.radio-option { - position: relative; -} - -.radio-option input { - position: absolute; - opacity: 0; - cursor: pointer; - inset: 0; - z-index: 2; -} - -.radio-tile { - display: flex; - flex-direction: column; - align-items: center; - justify-content: center; - padding: 1.25rem; - background: rgba(15, 23, 42, 0.6); - border: 1px solid var(--border); - border-radius: 16px; - cursor: pointer; - transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); - position: relative; - overflow: hidden; -} - -.radio-tile::before { - content: ''; - position: absolute; - inset: 0; - background: radial-gradient(circle at center, var(--primary-light), transparent 70%); - opacity: 0; - transition: opacity 0.3s; -} - -.radio-icon { - margin-bottom: 0.5rem; - color: var(--text-muted); - transition: all 0.3s; -} - -.radio-tile span { - font-size: 0.875rem; - font-weight: 500; - color: var(--text-muted); - transition: color 0.3s; -} - -.radio-option input:checked + .radio-tile { - border-color: var(--primary); - background: rgba(15, 23, 42, 0.9); - box-shadow: 0 0 0 1px var(--primary), 0 10px 25px -5px var(--primary-light); - transform: translateY(-2px); -} - -.radio-option input:checked + .radio-tile .radio-icon { - color: var(--primary); - transform: scale(1.1); -} - -.radio-option input:checked + .radio-tile span { - color: var(--text-main); -} - -.radio-option input:checked + .radio-tile::before { - opacity: 1; -} - -.radio-option:hover .radio-tile { - border-color: rgba(148, 163, 184, 0.3); -} - -/* Buttons */ -.btn { - width: 100%; - padding: 1rem; - border: none; - border-radius: 12px; - font-weight: 600; - font-size: 1rem; - cursor: pointer; - transition: all 0.3s cubic-bezier(0.4, 0, 0.2, 1); - display: flex; - align-items: center; - justify-content: center; - gap: 0.75rem; - position: relative; - overflow: hidden; -} - -.btn-primary { - background: linear-gradient(135deg, var(--gradient-start) 0%, var(--gradient-end) 100%); - color: white; - box-shadow: 0 4px 12px rgba(59, 130, 246, 0.3), inset 0 1px 0 rgba(255, 255, 255, 0.2); -} - -.btn-primary:hover { - transform: translateY(-2px); - box-shadow: 0 8px 24px rgba(59, 130, 246, 0.5), inset 0 1px 0 rgba(255, 255, 255, 0.2); -} - -.btn-primary:active { - transform: translateY(0); -} - -.btn-outline { - background: rgba(15, 23, 42, 0.4); - border: 1px solid var(--border); - color: var(--text-muted); -} - -.btn-outline:hover { - border-color: var(--primary); - color: var(--text-main); - background: rgba(14, 165, 233, 0.1); - box-shadow: 0 0 15px rgba(14, 165, 233, 0.1); -} - -/* Messages */ -.error-box { - background: rgba(239, 68, 68, 0.1); - border: 1px solid rgba(239, 68, 68, 0.2); - color: #fca5a5; - padding: 1rem; - border-radius: 12px; - margin-bottom: 1.5rem; - font-size: 0.875rem; - display: none; - animation: shake 0.4s cubic-bezier(.36,.07,.19,.97) both; -} - -@keyframes shake { - 10%, 90% { transform: translate3d(-1px, 0, 0); } - 20%, 80% { transform: translate3d(2px, 0, 0); } - 30%, 50%, 70% { transform: translate3d(-4px, 0, 0); } - 40%, 60% { transform: translate3d(4px, 0, 0); } -} - -.success-view { - text-align: center; - display: none; - animation: fadeIn 0.5s ease-out; -} - -@keyframes fadeIn { - from { opacity: 0; transform: translateY(10px); } - to { opacity: 1; transform: translateY(0); } -} - -.success-icon { - width: 80px; - height: 80px; - background: linear-gradient(135deg, rgba(16, 185, 129, 0.1), rgba(16, 185, 129, 0.05)); - color: var(--success); - border: 1px solid rgba(16, 185, 129, 0.2); - border-radius: 50%; - display: flex; - align-items: center; - justify-content: center; - margin: 0 auto 1.5rem; - box-shadow: 0 0 30px rgba(16, 185, 129, 0.2); -} - -/* Utils */ -.helper-text { - font-size: 0.75rem; - color: var(--text-muted); - margin-top: 0.5rem; -} - -.hidden { - display: none; -} - -.url-box { - background: rgba(2, 6, 23, 0.6); - padding: 1.25rem; - border-radius: 12px; - border: 1px solid var(--border); - font-family: 'JetBrains Mono', monospace; - color: #38bdf8; - word-break: break-all; - margin-bottom: 1.5rem; - font-size: 0.875rem; - position: relative; -} - -/* Warning Box */ -.warning-box { - background: rgba(245, 158, 11, 0.08); - border: 1px solid rgba(245, 158, 11, 0.2); - border-radius: 12px; - padding: 1rem; - margin-bottom: 1.5rem; - display: flex; - gap: 1rem; - align-items: start; - text-align: left; -} - -.warning-icon { - color: var(--warning); - flex-shrink: 0; -} - -.warning-content h4 { - color: #fbbf24; - font-size: 0.875rem; - margin-bottom: 0.25rem; - text-transform: uppercase; - letter-spacing: 0.05em; -} - -/* Animations */ -@keyframes spin { - to { transform: rotate(360deg); } -} - -.loader { - width: 20px; - height: 20px; - border: 2px solid rgba(255, 255, 255, 0.3); - border-top-color: white; - border-radius: 50%; - animation: spin 0.8s linear infinite; -} - -/* Version Badge */ -.version-badge { - display: inline-block; - background: rgba(14, 165, 233, 0.1); - border: 1px solid rgba(14, 165, 233, 0.3); - color: var(--primary); - padding: 0.25rem 0.75rem; - border-radius: 12px; - font-size: 0.75rem; - font-weight: 600; - margin-top: 0.5rem; - font-family: 'JetBrains Mono', monospace; - letter-spacing: 0.05em; -} - -/* Announcement */ -.announcement { - background: linear-gradient(to right, rgba(59, 130, 246, 0.1), rgba(6, 182, 212, 0.1)); - border: 1px solid rgba(59, 130, 246, 0.2); - padding: 0.75rem; - border-radius: 12px; - margin-bottom: 2rem; - font-size: 0.875rem; - color: var(--text-main); - display: flex; - align-items: center; - gap: 0.5rem; -} diff --git a/uv.lock b/uv.lock index 2daf6e2..3397e59 100644 --- a/uv.lock +++ b/uv.lock @@ -68,6 +68,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/fe/ba/e2081de779ca30d473f21f5b30e0e737c438205440784c7dfc81efc2b029/async_timeout-5.0.1-py3-none-any.whl", hash = "sha256:39e3809566ff85354557ec2398b55e096c8364bacac9405a7a1fa429e77fe76c", size = 6233, upload-time = "2024-11-06T16:41:37.9Z" }, ] +[[package]] +name = "beautifulsoup4" +version = "4.14.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "soupsieve" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c3/b0/1c6a16426d389813b48d95e26898aff79abbde42ad353958ad95cc8c9b21/beautifulsoup4-4.14.3.tar.gz", hash = "sha256:6292b1c5186d356bba669ef9f7f051757099565ad9ada5dd630bd9de5fa7fb86", size = 627737, upload-time = "2025-11-30T15:08:26.084Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1a/39/47f9197bdd44df24d67ac8893641e16f386c984a0619ef2ee4c51fbbc019/beautifulsoup4-4.14.3-py3-none-any.whl", hash = "sha256:0918bfe44902e6ad8d57732ba310582e98da931428d231a5ecb9e7c703a735bb", size = 107721, upload-time = "2025-11-30T15:08:24.087Z" }, +] + [[package]] name = "black" version = "25.11.0" @@ -216,6 +229,95 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/db/3c/33bac158f8ab7f89b2e59426d5fe2e4f63f7ed25df84c036890172b412b5/cfgv-3.5.0-py2.py3-none-any.whl", hash = "sha256:a8dc6b26ad22ff227d2634a65cb388215ce6cc96bbcc5cfde7641ae87e8dacc0", size = 7445, upload-time = "2025-11-19T20:55:50.744Z" }, ] +[[package]] +name = "charset-normalizer" +version = "3.4.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1f/b8/6d51fc1d52cbd52cd4ccedd5b5b2f0f6a11bbf6765c782298b0f3e808541/charset_normalizer-3.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e824f1492727fa856dd6eda4f7cee25f8518a12f3c4a56a74e8095695089cf6d", size = 209709, upload-time = "2025-10-14T04:40:11.385Z" }, + { url = "https://files.pythonhosted.org/packages/5c/af/1f9d7f7faafe2ddfb6f72a2e07a548a629c61ad510fe60f9630309908fef/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4bd5d4137d500351a30687c2d3971758aac9a19208fc110ccb9d7188fbe709e8", size = 148814, upload-time = "2025-10-14T04:40:13.135Z" }, + { url = "https://files.pythonhosted.org/packages/79/3d/f2e3ac2bbc056ca0c204298ea4e3d9db9b4afe437812638759db2c976b5f/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:027f6de494925c0ab2a55eab46ae5129951638a49a34d87f4c3eda90f696b4ad", size = 144467, upload-time = "2025-10-14T04:40:14.728Z" }, + { url = "https://files.pythonhosted.org/packages/ec/85/1bf997003815e60d57de7bd972c57dc6950446a3e4ccac43bc3070721856/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f820802628d2694cb7e56db99213f930856014862f3fd943d290ea8438d07ca8", size = 162280, upload-time = "2025-10-14T04:40:16.14Z" }, + { url = "https://files.pythonhosted.org/packages/3e/8e/6aa1952f56b192f54921c436b87f2aaf7c7a7c3d0d1a765547d64fd83c13/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:798d75d81754988d2565bff1b97ba5a44411867c0cf32b77a7e8f8d84796b10d", size = 159454, upload-time = "2025-10-14T04:40:17.567Z" }, + { url = "https://files.pythonhosted.org/packages/36/3b/60cbd1f8e93aa25d1c669c649b7a655b0b5fb4c571858910ea9332678558/charset_normalizer-3.4.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d1bb833febdff5c8927f922386db610b49db6e0d4f4ee29601d71e7c2694313", size = 153609, upload-time = "2025-10-14T04:40:19.08Z" }, + { url = "https://files.pythonhosted.org/packages/64/91/6a13396948b8fd3c4b4fd5bc74d045f5637d78c9675585e8e9fbe5636554/charset_normalizer-3.4.4-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:9cd98cdc06614a2f768d2b7286d66805f94c48cde050acdbbb7db2600ab3197e", size = 151849, upload-time = "2025-10-14T04:40:20.607Z" }, + { url = "https://files.pythonhosted.org/packages/b7/7a/59482e28b9981d105691e968c544cc0df3b7d6133152fb3dcdc8f135da7a/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:077fbb858e903c73f6c9db43374fd213b0b6a778106bc7032446a8e8b5b38b93", size = 151586, upload-time = "2025-10-14T04:40:21.719Z" }, + { url = "https://files.pythonhosted.org/packages/92/59/f64ef6a1c4bdd2baf892b04cd78792ed8684fbc48d4c2afe467d96b4df57/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:244bfb999c71b35de57821b8ea746b24e863398194a4014e4c76adc2bbdfeff0", size = 145290, upload-time = "2025-10-14T04:40:23.069Z" }, + { url = "https://files.pythonhosted.org/packages/6b/63/3bf9f279ddfa641ffa1962b0db6a57a9c294361cc2f5fcac997049a00e9c/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:64b55f9dce520635f018f907ff1b0df1fdc31f2795a922fb49dd14fbcdf48c84", size = 163663, upload-time = "2025-10-14T04:40:24.17Z" }, + { url = "https://files.pythonhosted.org/packages/ed/09/c9e38fc8fa9e0849b172b581fd9803bdf6e694041127933934184e19f8c3/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:faa3a41b2b66b6e50f84ae4a68c64fcd0c44355741c6374813a800cd6695db9e", size = 151964, upload-time = "2025-10-14T04:40:25.368Z" }, + { url = "https://files.pythonhosted.org/packages/d2/d1/d28b747e512d0da79d8b6a1ac18b7ab2ecfd81b2944c4c710e166d8dd09c/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:6515f3182dbe4ea06ced2d9e8666d97b46ef4c75e326b79bb624110f122551db", size = 161064, upload-time = "2025-10-14T04:40:26.806Z" }, + { url = "https://files.pythonhosted.org/packages/bb/9a/31d62b611d901c3b9e5500c36aab0ff5eb442043fb3a1c254200d3d397d9/charset_normalizer-3.4.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cc00f04ed596e9dc0da42ed17ac5e596c6ccba999ba6bd92b0e0aef2f170f2d6", size = 155015, upload-time = "2025-10-14T04:40:28.284Z" }, + { url = "https://files.pythonhosted.org/packages/1f/f3/107e008fa2bff0c8b9319584174418e5e5285fef32f79d8ee6a430d0039c/charset_normalizer-3.4.4-cp310-cp310-win32.whl", hash = "sha256:f34be2938726fc13801220747472850852fe6b1ea75869a048d6f896838c896f", size = 99792, upload-time = "2025-10-14T04:40:29.613Z" }, + { url = "https://files.pythonhosted.org/packages/eb/66/e396e8a408843337d7315bab30dbf106c38966f1819f123257f5520f8a96/charset_normalizer-3.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:a61900df84c667873b292c3de315a786dd8dac506704dea57bc957bd31e22c7d", size = 107198, upload-time = "2025-10-14T04:40:30.644Z" }, + { url = "https://files.pythonhosted.org/packages/b5/58/01b4f815bf0312704c267f2ccb6e5d42bcc7752340cd487bc9f8c3710597/charset_normalizer-3.4.4-cp310-cp310-win_arm64.whl", hash = "sha256:cead0978fc57397645f12578bfd2d5ea9138ea0fac82b2f63f7f7c6877986a69", size = 100262, upload-time = "2025-10-14T04:40:32.108Z" }, + { url = "https://files.pythonhosted.org/packages/ed/27/c6491ff4954e58a10f69ad90aca8a1b6fe9c5d3c6f380907af3c37435b59/charset_normalizer-3.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6e1fcf0720908f200cd21aa4e6750a48ff6ce4afe7ff5a79a90d5ed8a08296f8", size = 206988, upload-time = "2025-10-14T04:40:33.79Z" }, + { url = "https://files.pythonhosted.org/packages/94/59/2e87300fe67ab820b5428580a53cad894272dbb97f38a7a814a2a1ac1011/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f819d5fe9234f9f82d75bdfa9aef3a3d72c4d24a6e57aeaebba32a704553aa0", size = 147324, upload-time = "2025-10-14T04:40:34.961Z" }, + { url = "https://files.pythonhosted.org/packages/07/fb/0cf61dc84b2b088391830f6274cb57c82e4da8bbc2efeac8c025edb88772/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:a59cb51917aa591b1c4e6a43c132f0cdc3c76dbad6155df4e28ee626cc77a0a3", size = 142742, upload-time = "2025-10-14T04:40:36.105Z" }, + { url = "https://files.pythonhosted.org/packages/62/8b/171935adf2312cd745d290ed93cf16cf0dfe320863ab7cbeeae1dcd6535f/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:8ef3c867360f88ac904fd3f5e1f902f13307af9052646963ee08ff4f131adafc", size = 160863, upload-time = "2025-10-14T04:40:37.188Z" }, + { url = "https://files.pythonhosted.org/packages/09/73/ad875b192bda14f2173bfc1bc9a55e009808484a4b256748d931b6948442/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d9e45d7faa48ee908174d8fe84854479ef838fc6a705c9315372eacbc2f02897", size = 157837, upload-time = "2025-10-14T04:40:38.435Z" }, + { url = "https://files.pythonhosted.org/packages/6d/fc/de9cce525b2c5b94b47c70a4b4fb19f871b24995c728e957ee68ab1671ea/charset_normalizer-3.4.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:840c25fb618a231545cbab0564a799f101b63b9901f2569faecd6b222ac72381", size = 151550, upload-time = "2025-10-14T04:40:40.053Z" }, + { url = "https://files.pythonhosted.org/packages/55/c2/43edd615fdfba8c6f2dfbd459b25a6b3b551f24ea21981e23fb768503ce1/charset_normalizer-3.4.4-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ca5862d5b3928c4940729dacc329aa9102900382fea192fc5e52eb69d6093815", size = 149162, upload-time = "2025-10-14T04:40:41.163Z" }, + { url = "https://files.pythonhosted.org/packages/03/86/bde4ad8b4d0e9429a4e82c1e8f5c659993a9a863ad62c7df05cf7b678d75/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9c7f57c3d666a53421049053eaacdd14bbd0a528e2186fcb2e672effd053bb0", size = 150019, upload-time = "2025-10-14T04:40:42.276Z" }, + { url = "https://files.pythonhosted.org/packages/1f/86/a151eb2af293a7e7bac3a739b81072585ce36ccfb4493039f49f1d3cae8c/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:277e970e750505ed74c832b4bf75dac7476262ee2a013f5574dd49075879e161", size = 143310, upload-time = "2025-10-14T04:40:43.439Z" }, + { url = "https://files.pythonhosted.org/packages/b5/fe/43dae6144a7e07b87478fdfc4dbe9efd5defb0e7ec29f5f58a55aeef7bf7/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31fd66405eaf47bb62e8cd575dc621c56c668f27d46a61d975a249930dd5e2a4", size = 162022, upload-time = "2025-10-14T04:40:44.547Z" }, + { url = "https://files.pythonhosted.org/packages/80/e6/7aab83774f5d2bca81f42ac58d04caf44f0cc2b65fc6db2b3b2e8a05f3b3/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:0d3d8f15c07f86e9ff82319b3d9ef6f4bf907608f53fe9d92b28ea9ae3d1fd89", size = 149383, upload-time = "2025-10-14T04:40:46.018Z" }, + { url = "https://files.pythonhosted.org/packages/4f/e8/b289173b4edae05c0dde07f69f8db476a0b511eac556dfe0d6bda3c43384/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:9f7fcd74d410a36883701fafa2482a6af2ff5ba96b9a620e9e0721e28ead5569", size = 159098, upload-time = "2025-10-14T04:40:47.081Z" }, + { url = "https://files.pythonhosted.org/packages/d8/df/fe699727754cae3f8478493c7f45f777b17c3ef0600e28abfec8619eb49c/charset_normalizer-3.4.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:ebf3e58c7ec8a8bed6d66a75d7fb37b55e5015b03ceae72a8e7c74495551e224", size = 152991, upload-time = "2025-10-14T04:40:48.246Z" }, + { url = "https://files.pythonhosted.org/packages/1a/86/584869fe4ddb6ffa3bd9f491b87a01568797fb9bd8933f557dba9771beaf/charset_normalizer-3.4.4-cp311-cp311-win32.whl", hash = "sha256:eecbc200c7fd5ddb9a7f16c7decb07b566c29fa2161a16cf67b8d068bd21690a", size = 99456, upload-time = "2025-10-14T04:40:49.376Z" }, + { url = "https://files.pythonhosted.org/packages/65/f6/62fdd5feb60530f50f7e38b4f6a1d5203f4d16ff4f9f0952962c044e919a/charset_normalizer-3.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:5ae497466c7901d54b639cf42d5b8c1b6a4fead55215500d2f486d34db48d016", size = 106978, upload-time = "2025-10-14T04:40:50.844Z" }, + { url = "https://files.pythonhosted.org/packages/7a/9d/0710916e6c82948b3be62d9d398cb4fcf4e97b56d6a6aeccd66c4b2f2bd5/charset_normalizer-3.4.4-cp311-cp311-win_arm64.whl", hash = "sha256:65e2befcd84bc6f37095f5961e68a6f077bf44946771354a28ad434c2cce0ae1", size = 99969, upload-time = "2025-10-14T04:40:52.272Z" }, + { url = "https://files.pythonhosted.org/packages/f3/85/1637cd4af66fa687396e757dec650f28025f2a2f5a5531a3208dc0ec43f2/charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394", size = 208425, upload-time = "2025-10-14T04:40:53.353Z" }, + { url = "https://files.pythonhosted.org/packages/9d/6a/04130023fef2a0d9c62d0bae2649b69f7b7d8d24ea5536feef50551029df/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25", size = 148162, upload-time = "2025-10-14T04:40:54.558Z" }, + { url = "https://files.pythonhosted.org/packages/78/29/62328d79aa60da22c9e0b9a66539feae06ca0f5a4171ac4f7dc285b83688/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef", size = 144558, upload-time = "2025-10-14T04:40:55.677Z" }, + { url = "https://files.pythonhosted.org/packages/86/bb/b32194a4bf15b88403537c2e120b817c61cd4ecffa9b6876e941c3ee38fe/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d", size = 161497, upload-time = "2025-10-14T04:40:57.217Z" }, + { url = "https://files.pythonhosted.org/packages/19/89/a54c82b253d5b9b111dc74aca196ba5ccfcca8242d0fb64146d4d3183ff1/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8", size = 159240, upload-time = "2025-10-14T04:40:58.358Z" }, + { url = "https://files.pythonhosted.org/packages/c0/10/d20b513afe03acc89ec33948320a5544d31f21b05368436d580dec4e234d/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86", size = 153471, upload-time = "2025-10-14T04:40:59.468Z" }, + { url = "https://files.pythonhosted.org/packages/61/fa/fbf177b55bdd727010f9c0a3c49eefa1d10f960e5f09d1d887bf93c2e698/charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a", size = 150864, upload-time = "2025-10-14T04:41:00.623Z" }, + { url = "https://files.pythonhosted.org/packages/05/12/9fbc6a4d39c0198adeebbde20b619790e9236557ca59fc40e0e3cebe6f40/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f", size = 150647, upload-time = "2025-10-14T04:41:01.754Z" }, + { url = "https://files.pythonhosted.org/packages/ad/1f/6a9a593d52e3e8c5d2b167daf8c6b968808efb57ef4c210acb907c365bc4/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc", size = 145110, upload-time = "2025-10-14T04:41:03.231Z" }, + { url = "https://files.pythonhosted.org/packages/30/42/9a52c609e72471b0fc54386dc63c3781a387bb4fe61c20231a4ebcd58bdd/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf", size = 162839, upload-time = "2025-10-14T04:41:04.715Z" }, + { url = "https://files.pythonhosted.org/packages/c4/5b/c0682bbf9f11597073052628ddd38344a3d673fda35a36773f7d19344b23/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15", size = 150667, upload-time = "2025-10-14T04:41:05.827Z" }, + { url = "https://files.pythonhosted.org/packages/e4/24/a41afeab6f990cf2daf6cb8c67419b63b48cf518e4f56022230840c9bfb2/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9", size = 160535, upload-time = "2025-10-14T04:41:06.938Z" }, + { url = "https://files.pythonhosted.org/packages/2a/e5/6a4ce77ed243c4a50a1fecca6aaaab419628c818a49434be428fe24c9957/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0", size = 154816, upload-time = "2025-10-14T04:41:08.101Z" }, + { url = "https://files.pythonhosted.org/packages/a8/ef/89297262b8092b312d29cdb2517cb1237e51db8ecef2e9af5edbe7b683b1/charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26", size = 99694, upload-time = "2025-10-14T04:41:09.23Z" }, + { url = "https://files.pythonhosted.org/packages/3d/2d/1e5ed9dd3b3803994c155cd9aacb60c82c331bad84daf75bcb9c91b3295e/charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525", size = 107131, upload-time = "2025-10-14T04:41:10.467Z" }, + { url = "https://files.pythonhosted.org/packages/d0/d9/0ed4c7098a861482a7b6a95603edce4c0d9db2311af23da1fb2b75ec26fc/charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3", size = 100390, upload-time = "2025-10-14T04:41:11.915Z" }, + { url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" }, + { url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" }, + { url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" }, + { url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" }, + { url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" }, + { url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" }, + { url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" }, + { url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" }, + { url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" }, + { url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" }, + { url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" }, + { url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" }, + { url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" }, + { url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" }, + { url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" }, + { url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" }, + { url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" }, + { url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" }, + { url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" }, + { url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" }, + { url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" }, + { url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" }, + { url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" }, + { url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" }, + { url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" }, + { url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" }, + { url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" }, + { url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" }, + { url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, + { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, + { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, + { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, +] + [[package]] name = "click" version = "8.3.1" @@ -302,6 +404,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0d/c3/e90f4a4feae6410f914f8ebac129b9ae7a8c92eb60a638012dde42030a9d/cryptography-46.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6b5063083824e5509fdba180721d55909ffacccc8adbec85268b48439423d78c", size = 3438528, upload-time = "2025-10-15T23:18:26.227Z" }, ] +[[package]] +name = "deep-translator" +version = "1.11.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "beautifulsoup4" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7d/03/8fa7635c729a01de71151894cdf002ad6d245bfd6d1a731da864cf534dcf/deep_translator-1.11.4.tar.gz", hash = "sha256:801260c69231138707ea88a0955e484db7d40e210c9e0ae0f77372ffda5f4bf5", size = 36043, upload-time = "2023-06-28T19:55:23.499Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/3f/61a8ef73236dbea83a1a063a8af2f8e1e41a0df64f122233938391d0f175/deep_translator-1.11.4-py3-none-any.whl", hash = "sha256:d635df037e23fa35d12fd42dab72a0b55c9dd19e6292009ee7207e3f30b9e60a", size = 42285, upload-time = "2023-06-28T19:55:20.928Z" }, +] + [[package]] name = "distlib" version = "0.4.0" @@ -361,6 +476,44 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/3c/4e/230bc6b366dd2217e0b7681b5ace14a3fb4aec12bedb5666f33e19375cc7/flake9-3.8.3.post2-py3-none-any.whl", hash = "sha256:47dced969a802a8892740bcaa35ae07232709b2ade803c45f48dd03ccb7f825f", size = 73780, upload-time = "2020-06-19T08:19:09.437Z" }, ] +[[package]] +name = "google-auth" +version = "2.43.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "pyasn1-modules" }, + { name = "rsa" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ff/ef/66d14cf0e01b08d2d51ffc3c20410c4e134a1548fc246a6081eae585a4fe/google_auth-2.43.0.tar.gz", hash = "sha256:88228eee5fc21b62a1b5fe773ca15e67778cb07dc8363adcb4a8827b52d81483", size = 296359, upload-time = "2025-11-06T00:13:36.587Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6f/d1/385110a9ae86d91cc14c5282c61fe9f4dc41c0b9f7d423c6ad77038c4448/google_auth-2.43.0-py2.py3-none-any.whl", hash = "sha256:af628ba6fa493f75c7e9dbe9373d148ca9f4399b5ea29976519e0a3848eddd16", size = 223114, upload-time = "2025-11-06T00:13:35.209Z" }, +] + +[package.optional-dependencies] +requests = [ + { name = "requests" }, +] + +[[package]] +name = "google-genai" +version = "1.54.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "google-auth", extra = ["requests"] }, + { name = "httpx" }, + { name = "pydantic" }, + { name = "requests" }, + { name = "tenacity" }, + { name = "typing-extensions" }, + { name = "websockets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e0/5d/0b8305a034db5ffcaf99d0842a0d941e01851c1c3806c68fb43723837c72/google_genai-1.54.0.tar.gz", hash = "sha256:ab7de6741437ee17f01d4db85e351eb8504466663cd83ce420ecb4e29b58b00d", size = 260467, upload-time = "2025-12-08T19:03:13.972Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/93/7096cdc1a4a55cc60bc02638f7077255acd32968c437cc32783e5abe430d/google_genai-1.54.0-py3-none-any.whl", hash = "sha256:c06853402814a47bb020f2dc50fc03fb77cc349dff65da35cddbd19046f9bd58", size = 262359, upload-time = "2025-12-08T19:03:12.337Z" }, +] + [[package]] name = "h11" version = "0.16.0" @@ -370,6 +523,28 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, ] +[[package]] +name = "h2" +version = "4.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "hpack" }, + { name = "hyperframe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1d/17/afa56379f94ad0fe8defd37d6eb3f89a25404ffc71d4d848893d270325fc/h2-4.3.0.tar.gz", hash = "sha256:6c59efe4323fa18b47a632221a1888bd7fde6249819beda254aeca909f221bf1", size = 2152026, upload-time = "2025-08-23T18:12:19.778Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/b2/119f6e6dcbd96f9069ce9a2665e0146588dc9f88f29549711853645e736a/h2-4.3.0-py3-none-any.whl", hash = "sha256:c438f029a25f7945c69e0ccf0fb951dc3f73a5f6412981daee861431b70e2bdd", size = 61779, upload-time = "2025-08-23T18:12:17.779Z" }, +] + +[[package]] +name = "hpack" +version = "4.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2c/48/71de9ed269fdae9c8057e5a4c0aa7402e8bb16f2c6e90b3aa53327b113f8/hpack-4.1.0.tar.gz", hash = "sha256:ec5eca154f7056aa06f196a557655c5b009b382873ac8d1e66e79e87535f1dca", size = 51276, upload-time = "2025-01-22T21:44:58.347Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/07/c6/80c95b1b2b94682a72cbdbfb85b81ae2daffa4291fbfa1b1464502ede10d/hpack-4.1.0-py3-none-any.whl", hash = "sha256:157ac792668d995c657d93111f46b4535ed114f0c9c8d672271bbec7eae1b496", size = 34357, upload-time = "2025-01-22T21:44:56.92Z" }, +] + [[package]] name = "httpcore" version = "1.0.9" @@ -441,6 +616,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" }, ] +[package.optional-dependencies] +http2 = [ + { name = "h2" }, +] + +[[package]] +name = "hyperframe" +version = "6.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/02/e7/94f8232d4a74cc99514c13a9f995811485a6903d48e5d952771ef6322e30/hyperframe-6.1.0.tar.gz", hash = "sha256:f630908a00854a7adeabd6382b43923a4c4cd4b821fcb527e6ab9e15382a3b08", size = 26566, upload-time = "2025-01-22T21:41:49.302Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/48/30/47d0bf6072f7252e6521f3447ccfa40b421b6824517f82854703d0f5a98b/hyperframe-6.1.0-py3-none-any.whl", hash = "sha256:b03380493a519fce58ea5af42e4a42317bf9bd425596f7a0835ffce80f1a42e5", size = 13007, upload-time = "2025-01-22T21:41:47.295Z" }, +] + [[package]] name = "identify" version = "2.6.15" @@ -542,6 +731,27 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/27/11/574fe7d13acf30bfd0a8dd7fa1647040f2b8064f13f43e8c963b1e65093b/pre_commit-4.4.0-py2.py3-none-any.whl", hash = "sha256:b35ea52957cbf83dcc5d8ee636cbead8624e3a15fbfa61a370e42158ac8a5813", size = 226049, upload-time = "2025-11-08T21:12:10.228Z" }, ] +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, +] + [[package]] name = "pycodestyle" version = "2.6.0" @@ -810,6 +1020,33 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/89/f0/8956f8a86b20d7bb9d6ac0187cf4cd54d8065bc9a1a09eb8011d4d326596/redis-7.1.0-py3-none-any.whl", hash = "sha256:23c52b208f92b56103e17c5d06bdc1a6c2c0b3106583985a76a18f83b265de2b", size = 354159, upload-time = "2025-11-19T15:54:38.064Z" }, ] +[[package]] +name = "requests" +version = "2.32.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c9/74/b3ff8e6c8446842c3f5c837e9c3dfcfe2018ea6ecef224c710c85ef728f4/requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf", size = 134517, upload-time = "2025-08-18T20:46:02.573Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, +] + +[[package]] +name = "rsa" +version = "4.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034, upload-time = "2025-04-16T09:51:18.218Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" }, +] + [[package]] name = "sniffio" version = "1.3.1" @@ -819,6 +1056,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, ] +[[package]] +name = "soupsieve" +version = "2.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6d/e6/21ccce3262dd4889aa3332e5a119a3491a95e8f60939870a3a035aabac0d/soupsieve-2.8.tar.gz", hash = "sha256:e2dd4a40a628cb5f28f6d4b0db8800b8f581b65bb380b97de22ba5ca8d72572f", size = 103472, upload-time = "2025-08-27T15:39:51.78Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/a0/bb38d3b76b8cae341dad93a2dd83ab7462e6dbcdd84d43f54ee60a8dc167/soupsieve-2.8-py3-none-any.whl", hash = "sha256:0cc76456a30e20f5d7f2e14a98a4ae2ee4e5abdc7c5ea0aafe795f344bc7984c", size = 36679, upload-time = "2025-08-27T15:39:50.179Z" }, +] + [[package]] name = "starlette" version = "0.49.3" @@ -832,6 +1078,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a3/e0/021c772d6a662f43b63044ab481dc6ac7592447605b5b35a957785363122/starlette-0.49.3-py3-none-any.whl", hash = "sha256:b579b99715fdc2980cf88c8ec96d3bf1ce16f5a8051a7c2b84ef9b1cdecaea2f", size = 74340, upload-time = "2025-11-01T15:12:24.387Z" }, ] +[[package]] +name = "tenacity" +version = "9.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0a/d4/2b0cd0fe285e14b36db076e78c93766ff1d529d70408bd1d2a5a84f1d929/tenacity-9.1.2.tar.gz", hash = "sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb", size = 48036, upload-time = "2025-04-02T08:25:09.966Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" }, +] + [[package]] name = "tomli" version = "2.3.0" @@ -923,6 +1178,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/c2/14/e2a54fabd4f08cd7af1c07030603c3356b74da07f7cc056e600436edfa17/tzlocal-5.3.1-py3-none-any.whl", hash = "sha256:eb1a66c3ef5847adf7a834f1be0800581b683b5608e74f86ecbcef8ab91bb85d", size = 18026, upload-time = "2025-03-05T21:17:39.857Z" }, ] +[[package]] +name = "urllib3" +version = "2.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1c/43/554c2569b62f49350597348fc3ac70f786e3c32e7f19d266e19817812dd3/urllib3-2.6.0.tar.gz", hash = "sha256:cb9bcef5a4b345d5da5d145dc3e30834f58e8018828cbc724d30b4cb7d4d49f1", size = 432585, upload-time = "2025-12-05T15:08:47.885Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/56/1a/9ffe814d317c5224166b23e7c47f606d6e473712a2fad0f704ea9b99f246/urllib3-2.6.0-py3-none-any.whl", hash = "sha256:c90f7a39f716c572c4e3e58509581ebd83f9b59cced005b7db7ad2d22b0db99f", size = 131083, upload-time = "2025-12-05T15:08:45.983Z" }, +] + [[package]] name = "uvicorn" version = "0.38.0" @@ -1112,19 +1376,21 @@ wheels = [ [[package]] name = "watchly" -version = "0.1.0" source = { virtual = "." } dependencies = [ { name = "apscheduler" }, { name = "async-lru" }, { name = "cachetools" }, { name = "cryptography" }, + { name = "deep-translator" }, { name = "fastapi" }, - { name = "httpx" }, + { name = "google-genai" }, + { name = "httpx", extra = ["http2"] }, { name = "loguru" }, { name = "pydantic" }, { name = "pydantic-settings" }, { name = "redis" }, + { name = "tomli" }, { name = "uvicorn", extra = ["standard"] }, ] @@ -1141,12 +1407,15 @@ requires-dist = [ { name = "async-lru", specifier = ">=2.0.5" }, { name = "cachetools", specifier = ">=6.2.2" }, { name = "cryptography", specifier = ">=46.0.3" }, + { name = "deep-translator", specifier = ">=1.11.4" }, { name = "fastapi", specifier = ">=0.104.1" }, - { name = "httpx", specifier = ">=0.25.2" }, + { name = "google-genai", specifier = ">=1.54.0" }, + { name = "httpx", extras = ["http2"], specifier = ">=0.25.2" }, { name = "loguru", specifier = ">=0.7.2" }, { name = "pydantic", specifier = ">=2.5.0" }, { name = "pydantic-settings", specifier = ">=2.1.0" }, { name = "redis", specifier = ">=5.0.1" }, + { name = "tomli", specifier = ">=2.3.0" }, { name = "uvicorn", extras = ["standard"], specifier = ">=0.24.0" }, ]