diff --git a/app/api/endpoints/announcement.py b/app/api/endpoints/announcement.py
index 5f31df1..19bce86 100644
--- a/app/api/endpoints/announcement.py
+++ b/app/api/endpoints/announcement.py
@@ -1,6 +1,6 @@
from fastapi import APIRouter
-from app.config import settings
+from app.core.config import settings
router = APIRouter(prefix="/announcement", tags=["announcement"])
diff --git a/app/api/endpoints/catalogs.py b/app/api/endpoints/catalogs.py
index 352cf08..34761d4 100644
--- a/app/api/endpoints/catalogs.py
+++ b/app/api/endpoints/catalogs.py
@@ -11,7 +11,7 @@
from app.services.token_store import token_store
MAX_RESULTS = 50
-SOURCE_ITEMS_LIMIT = 10
+SOURCE_ITEMS_LIMIT = 15
router = APIRouter()
diff --git a/app/api/endpoints/manifest.py b/app/api/endpoints/manifest.py
index ee6b8f7..6443787 100644
--- a/app/api/endpoints/manifest.py
+++ b/app/api/endpoints/manifest.py
@@ -137,7 +137,7 @@ async def _manifest_handler(response: Response, token: str):
@router.get("/manifest.json")
async def manifest():
- manifest = await get_base_manifest()
+ manifest = get_base_manifest()
# since user is not logged in, return empty catalogs
manifest["catalogs"] = []
return manifest
diff --git a/app/api/main.py b/app/api/main.py
index cf7f4e1..c0fd953 100644
--- a/app/api/main.py
+++ b/app/api/main.py
@@ -1,5 +1,6 @@
from fastapi import APIRouter
+from .endpoints.announcement import router as announcement_router
from .endpoints.catalogs import router as catalogs_router
from .endpoints.health import router as health_router
from .endpoints.manifest import router as manifest_router
@@ -19,3 +20,4 @@ async def root():
api_router.include_router(tokens_router)
api_router.include_router(health_router)
api_router.include_router(meta_router)
+api_router.include_router(announcement_router)
diff --git a/app/core/app.py b/app/core/app.py
index 85c8c53..bdff5f7 100644
--- a/app/core/app.py
+++ b/app/core/app.py
@@ -38,7 +38,16 @@ async def lifespan(app: FastAPI):
Manage application lifespan events (startup/shutdown).
"""
global catalog_updater
- asyncio.create_task(migrate_tokens())
+ task = asyncio.create_task(migrate_tokens())
+
+ # Ensure background exceptions are surfaced in logs
+ def _on_done(t: asyncio.Task):
+ try:
+ t.result()
+ except Exception as exc:
+ logger.error(f"migrate_tokens background task failed: {exc}")
+
+ task.add_done_callback(_on_done)
# Startup
if settings.AUTO_UPDATE_CATALOGS:
diff --git a/app/services/catalog.py b/app/services/catalog.py
index 9740ccf..17dbc8e 100644
--- a/app/services/catalog.py
+++ b/app/services/catalog.py
@@ -1,3 +1,5 @@
+from datetime import datetime, timezone
+
from app.core.settings import CatalogConfig, UserSettings
from app.services.row_generator import RowGeneratorService
from app.services.scoring import ScoringService
@@ -130,19 +132,24 @@ async def get_dynamic_catalogs(
return catalogs
async def _add_item_based_rows(
- self, catalogs: list, library_items: dict, content_type: str, language: str, loved_config, watched_config
+ self,
+ catalogs: list,
+ library_items: dict,
+ content_type: str,
+ language: str,
+ loved_config,
+ watched_config,
):
"""Helper to add 'Because you watched' and 'More like' rows."""
# Helper to parse date
def get_date(item):
- import datetime
val = item.get("state", {}).get("lastWatched")
if val:
try:
if isinstance(val, str):
- return datetime.datetime.fromisoformat(val.replace("Z", "+00:00"))
+ return datetime.fromisoformat(val.replace("Z", "+00:00"))
return val
except (ValueError, TypeError):
pass
@@ -150,10 +157,10 @@ def get_date(item):
val = item.get("_mtime")
if val:
try:
- return datetime.datetime.fromisoformat(str(val).replace("Z", "+00:00"))
+ return datetime.fromisoformat(str(val).replace("Z", "+00:00"))
except (ValueError, TypeError):
pass
- return datetime.datetime.min.replace(tzinfo=datetime.UTC)
+ return datetime.min.replace(tzinfo=timezone.utc)
# 1. More Like
last_loved = None # Initialize for the watched check
diff --git a/app/services/catalog_updater.py b/app/services/catalog_updater.py
index 2400195..53a5db3 100644
--- a/app/services/catalog_updater.py
+++ b/app/services/catalog_updater.py
@@ -31,7 +31,8 @@ async def refresh_catalogs_for_credentials(token: str, credentials: dict[str, An
addon_installed = await stremio_service.is_addon_installed(auth_key)
if not addon_installed:
logger.info(f"[{redact_token(token)}] User has not installed addon. Removing token from redis")
- await token_store.delete_token(key=token)
+ # Ensure we delete by token, not by raw Redis key
+ await token_store.delete_token(token=token)
return True
except Exception as e:
logger.exception(f"[{redact_token(token)}] Failed to check if addon is installed: {e}")
@@ -41,6 +42,7 @@ async def refresh_catalogs_for_credentials(token: str, credentials: dict[str, An
dynamic_catalog_service = DynamicCatalogService(stremio_service=stremio_service)
# Ensure user_settings is available
+ user_settings = get_default_settings()
if credentials.get("settings"):
try:
user_settings = UserSettings(**credentials["settings"])
@@ -140,7 +142,10 @@ async def _update_safe(key: str, payload: dict[str, Any]) -> None:
try:
async for key, payload in token_store.iter_payloads():
- tasks.append(asyncio.create_task(_update_safe(key, payload)))
+ # Extract token from redis key prefix
+ prefix = token_store.KEY_PREFIX
+ tok = key[len(prefix) :] if key.startswith(prefix) else key # noqa
+ tasks.append(asyncio.create_task(_update_safe(tok, payload)))
if tasks:
logger.info(f"Starting background refresh for {len(tasks)} tokens...")
diff --git a/app/services/discovery.py b/app/services/discovery.py
index e56b3f2..b52d485 100644
--- a/app/services/discovery.py
+++ b/app/services/discovery.py
@@ -12,6 +12,8 @@ class DiscoveryEngine:
def __init__(self):
self.tmdb_service = TMDBService()
+ # Limit concurrent discovery calls to avoid rate limiting
+ self._sem = asyncio.Semaphore(10)
async def discover_recommendations(
self,
@@ -63,7 +65,7 @@ async def discover_recommendations(
for i in range(2):
params_rating = {
"with_genres": genre_ids,
- "sort_by": "ratings.desc",
+ "sort_by": "vote_average.desc",
"vote_count.gte": 500,
"page": i + 1,
**base_params,
@@ -85,7 +87,7 @@ async def discover_recommendations(
for i in range(3):
params_rating = {
"with_keywords": keyword_ids,
- "sort_by": "ratings.desc",
+ "sort_by": "vote_average.desc",
"vote_count.gte": 500,
"page": i + 1,
**base_params,
@@ -105,7 +107,7 @@ async def discover_recommendations(
params_rating = {
"with_cast": str(actor_id),
- "sort_by": "ratings.desc",
+ "sort_by": "vote_average.desc",
"vote_count.gte": 500,
**base_params,
}
@@ -124,7 +126,7 @@ async def discover_recommendations(
params_rating = {
"with_crew": str(director_id),
- "sort_by": "ratings.desc",
+ "sort_by": "vote_average.desc",
"vote_count.gte": 500,
**base_params,
}
@@ -143,7 +145,7 @@ async def discover_recommendations(
params_rating = {
"with_origin_country": country_ids,
- "sort_by": "ratings.desc",
+ "sort_by": "vote_average.desc",
"vote_count.gte": 300,
**base_params,
}
@@ -154,11 +156,11 @@ async def discover_recommendations(
year = top_year[0][0]
# we store year in 10 years bucket
start_year = f"{year}-01-01"
- end_year = f"{int(year) + 10}-12-31"
+ end_year = f"{int(year) + 9}-12-31"
params_rating = {
"primary_release_date.gte": start_year,
"primary_release_date.lte": end_year,
- "sort_by": "ratings.desc",
+ "sort_by": "vote_average.desc",
"vote_count.gte": 500,
**base_params,
}
@@ -181,7 +183,8 @@ async def discover_recommendations(
async def _fetch_discovery(self, media_type: str, params: dict) -> list[dict]:
"""Helper to call TMDB discovery."""
try:
- data = await self.tmdb_service.get_discover(media_type, **params)
- return data.get("results", [])
+ async with self._sem:
+ data = await self.tmdb_service.get_discover(media_type, **params)
+ return data.get("results", [])
except Exception:
return []
diff --git a/app/services/gemini.py b/app/services/gemini.py
index 079f7c3..8a96f7d 100644
--- a/app/services/gemini.py
+++ b/app/services/gemini.py
@@ -1,3 +1,5 @@
+import asyncio
+
from google import genai
from loguru import logger
@@ -52,5 +54,10 @@ def generate_content(self, prompt: str) -> str:
logger.error(f"Error generating content: {e}")
return ""
+ async def generate_content_async(self, prompt: str) -> str:
+ """Async wrapper to avoid blocking the event loop during network calls."""
+ loop = asyncio.get_running_loop()
+ return await loop.run_in_executor(None, lambda: self.generate_content(prompt))
+
gemini_service = GeminiService()
diff --git a/app/services/recommendation_service.py b/app/services/recommendation_service.py
index 9409d19..a11170b 100644
--- a/app/services/recommendation_service.py
+++ b/app/services/recommendation_service.py
@@ -1,4 +1,5 @@
import asyncio
+import random
from urllib.parse import unquote
from loguru import logger
@@ -72,12 +73,18 @@ def __init__(
async def _get_exclusion_sets(self, content_type: str | None = None) -> tuple[set[str], set[int]]:
"""
Fetch library items and build strict exclusion sets for watched content.
+ Also exclude items the user has added to library to avoid recommending duplicates.
Returns (watched_imdb_ids, watched_tmdb_ids)
"""
# Always fetch fresh library to ensure we don't recommend what was just watched
library_data = await self.stremio_service.get_library_items()
- # Combine loved and watched - both implies user has seen/interacted
- all_items = library_data.get("loved", []) + library_data.get("watched", [])
+ # Combine loved, watched, added, and removed (added/removed treated as exclude-only)
+ all_items = (
+ library_data.get("loved", [])
+ + library_data.get("watched", [])
+ + library_data.get("added", [])
+ + library_data.get("removed", [])
+ )
imdb_ids = set()
tmdb_ids = set()
@@ -115,9 +122,7 @@ async def _filter_candidates(
for item in candidates:
tmdb_id = item.get("id")
# 1. Check TMDB ID (Fast)
- if tmdb_id and (
- tmdb_id in watched_tmdb_ids or f"tmdb:{tmdb_id}" in watched_imdb_ids
- ): # check both sets just in case
+ if tmdb_id and tmdb_id in watched_tmdb_ids:
continue
# 2. Check external IDs (if present in candidate)
@@ -137,12 +142,15 @@ async def _fetch_metadata_for_items(self, items: list[dict], media_type: str) ->
# Ensure media_type is correct
query_media_type = "movie" if media_type == "movie" else "tv"
+ sem = asyncio.Semaphore(30)
+
async def _fetch_details(tmdb_id: int):
try:
- if query_media_type == "movie":
- return await self.tmdb_service.get_movie_details(tmdb_id)
- else:
- return await self.tmdb_service.get_tv_details(tmdb_id)
+ async with sem:
+ if query_media_type == "movie":
+ return await self.tmdb_service.get_movie_details(tmdb_id)
+ else:
+ return await self.tmdb_service.get_tv_details(tmdb_id)
except Exception as e:
logger.warning(f"Failed to fetch details for TMDB ID {tmdb_id}: {e}")
return None
@@ -337,11 +345,6 @@ async def get_recommendations_for_theme(self, theme_id: str, content_type: str,
# Apply Excluded Genres
excluded_ids = self._get_excluded_genre_ids(content_type)
if excluded_ids:
- # If with_genres is specified, we technically shouldn't exclude what is explicitly asked for?
- # But the user asked to "exclude those genres".
- # If I exclude them from "without_genres", TMDB might return 0 results if the theme IS that genre.
- # But RowGenerator safeguards against generating themes for excluded genres.
- # So this is safe for keyword/country rows.
params["without_genres"] = "|".join(str(g) for g in excluded_ids)
# Fetch
@@ -364,7 +367,7 @@ async def get_recommendations_for_theme(self, theme_id: str, content_type: str,
item.pop("_external_ids", None)
final_items.append(item)
- return final_items[:limit]
+ return final_items
async def _fetch_recommendations_from_tmdb(self, item_id: str, media_type: str, limit: int) -> list[dict]:
"""
@@ -381,10 +384,30 @@ async def _fetch_recommendations_from_tmdb(self, item_id: str, media_type: str,
media_type = detected_type
elif item_id.startswith("tmdb:"):
tmdb_id = int(item_id.split(":")[1])
+ # Detect media_type if unknown or invalid
+ if media_type not in ("movie", "tv", "series"):
+ detected_type = None
+ try:
+ details = await self.tmdb_service.get_movie_details(tmdb_id)
+ if details:
+ detected_type = "movie"
+ except Exception:
+ pass
+ if not detected_type:
+ try:
+ details = await self.tmdb_service.get_tv_details(tmdb_id)
+ if details:
+ detected_type = "tv"
+ except Exception:
+ pass
+ if detected_type:
+ media_type = detected_type
else:
tmdb_id = item_id
- recommendation_response = await self.tmdb_service.get_recommendations(tmdb_id, media_type)
+ # Normalize series alias
+ mtype = "tv" if media_type in ("tv", "series") else "movie"
+ recommendation_response = await self.tmdb_service.get_recommendations(tmdb_id, mtype)
recommended_items = recommendation_response.get("results", [])
if not recommended_items:
return []
@@ -407,7 +430,11 @@ async def get_recommendations(
# Step 1: Fetch & Score User Library
library_data = await self.stremio_service.get_library_items()
- all_items = library_data.get("loved", []) + library_data.get("watched", [])
+ all_items = library_data.get("loved", []) + library_data.get("watched", []) + library_data.get("added", [])
+ logger.info(f"processing {len(all_items)} Items.")
+ # Cold-start fallback remains (redundant safety)
+ if not all_items:
+ all_items = library_data.get("added", [])
# Build Exclusion Sets explicitly
watched_imdb_ids, watched_tmdb_ids = await self._get_exclusion_sets()
@@ -417,9 +444,10 @@ async def get_recommendations(
processed_items = []
scored_objects = []
- # OPTIMIZATION: Limit source items for profile building to recent history (last 30 items)
- sorted_history = sorted(unique_items.values(), key=lambda x: x.get("_mtime", ""), reverse=True)
- recent_history = sorted_history[:30]
+ sorted_history = sorted(
+ unique_items.values(), key=lambda x: x.get("state", {}).get("lastWatched"), reverse=True
+ )
+ recent_history = sorted_history[:source_items_limit]
for item_data in recent_history:
scored_obj = self.scoring_service.process_item(item_data)
@@ -485,16 +513,71 @@ async def get_recommendations(
final_score = (sim_score * 0.6) + (vote_score * 0.3) + (pop_score * 0.1)
+ # Add tiny jitter to promote freshness and avoid static ordering
+ jitter = random.uniform(-0.02, 0.02) # +/-2%
+ final_score = final_score * (1 + jitter)
+
# Boost candidate if its from tmdb collaborative recommendations
if item.get("_ranked_candidate"):
final_score *= 1.25
ranked_candidates.append((final_score, item))
- # Sort by Final Score
+ # Sort by Final Score and cache score on item for diversification
ranked_candidates.sort(key=lambda x: x[0], reverse=True)
+ for score, item in ranked_candidates:
+ item["_final_score"] = score
+
+ # Diversify with MMR to avoid shallow, repetitive picks
+ def _jaccard(a: set, b: set) -> float:
+ if not a and not b:
+ return 0.0
+ inter = len(a & b)
+ union = len(a | b)
+ return inter / union if union else 0.0
+
+ def _candidate_similarity(x: dict, y: dict) -> float:
+ gx = set(x.get("genre_ids") or [])
+ gy = set(y.get("genre_ids") or [])
+ s = _jaccard(gx, gy)
+ # Mild penalty if same language to encourage variety
+ lx = x.get("original_language")
+ ly = y.get("original_language")
+ if lx and ly and lx == ly:
+ s += 0.05
+ return min(s, 1.0)
+
+ def _mmr_select(cands: list[dict], k: int, lamb: float = 0.75) -> list[dict]:
+ selected: list[dict] = []
+ remaining = cands[:]
+ while remaining and len(selected) < k:
+ if not selected:
+ best = remaining.pop(0)
+ selected.append(best)
+ continue
+ best_item = None
+ best_score = float("-inf")
+ for cand in remaining[:50]: # evaluate a window for speed
+ rel = cand.get("_final_score", 0.0)
+ div = 0.0
+ for s in selected:
+ div = max(div, _candidate_similarity(cand, s))
+ mmr = lamb * rel - (1 - lamb) * div
+ if mmr > best_score:
+ best_score = mmr
+ best_item = cand
+ if best_item is None:
+ break
+ selected.append(best_item)
+ try:
+ remaining.remove(best_item)
+ except ValueError:
+ pass
+ return selected
- # Select with buffer for final IMDB filtering
- buffer_selection = [item for score, item in ranked_candidates[: max_results * 2]]
+ top_ranked_items = [item for _, item in ranked_candidates]
+ diversified = _mmr_select(top_ranked_items, k=max_results * 2, lamb=0.75)
+ # Select with buffer for final IMDB filtering after diversification
+ buffer_selection = diversified
# Fetch Full Metadata
meta_items = await self._fetch_metadata_for_items(buffer_selection, content_type)
@@ -510,7 +593,5 @@ async def get_recommendations(
item.pop("_external_ids", None)
final_items.append(item)
- if len(final_items) >= max_results:
- break
return final_items
diff --git a/app/services/row_generator.py b/app/services/row_generator.py
index 71cec06..3ba331e 100644
--- a/app/services/row_generator.py
+++ b/app/services/row_generator.py
@@ -74,7 +74,7 @@ def get_cname(code):
kw_name2 = await self._get_keyword_name(k_id2)
title = ""
if kw_name1 and kw_name2:
- title = gemini_service.generate_content(f"Keywords: {kw_name1} + {kw_name2}")
+ title = await gemini_service.generate_content_async(f"Keywords: {kw_name1} + {kw_name2}")
if title:
rows.append(
@@ -104,7 +104,7 @@ def get_cname(code):
if k_id:
kw_name = await self._get_keyword_name(k_id)
if kw_name:
- title = gemini_service.generate_content(
+ title = await gemini_service.generate_content_async(
f"Genre: {get_gname(g_id)} + Keyword: {normalize_keyword(kw_name)}"
)
if not title:
@@ -127,7 +127,7 @@ def get_cname(code):
c_code = top_countries[0][0]
c_adj = get_cname(c_code)
if c_adj:
- title = gemini_service.generate_content(f"Genre: {get_gname(g_id)} + Country: {c_adj}")
+ title = await gemini_service.generate_content_async(f"Genre: {get_gname(g_id)} + Country: {c_adj}")
if not title:
title = f"{get_gname(g_id)} {c_adj}"
rows.append(
@@ -150,7 +150,7 @@ def get_cname(code):
# # Only do this if decade is valid and somewhat old (nostalgia factor)
if 1970 <= decade_start <= 2010:
decade_str = str(decade_start)[2:] + "s" # "90s"
- title = gemini_service.generate_content(f"Genre: {get_gname(g_id)} + Era: {decade_str}")
+ title = await gemini_service.generate_content_async(f"Genre: {get_gname(g_id)} + Era: {decade_str}")
if not title:
title = f"{get_gname(g_id)} {decade_str}"
rows.append(
diff --git a/app/services/scoring.py b/app/services/scoring.py
index aba9c2d..c13b80b 100644
--- a/app/services/scoring.py
+++ b/app/services/scoring.py
@@ -13,11 +13,11 @@ class ScoringService:
# Rewatch, How many times? Watched but duration?? What if user stopped watching in middle?
# Weights for different factors
- WEIGHT_WATCH_PERCENTAGE = 0.25
- WEIGHT_REWATCH = 0.20
- WEIGHT_RECENCY = 0.20
- WEIGHT_EXPLICIT_RATING = 0.3
- ADDED_TO_LIBRARY_WEIGHT = 0.05
+ WEIGHT_WATCH_PERCENTAGE = 0.10
+ WEIGHT_REWATCH = 0.17
+ WEIGHT_RECENCY = 0.30
+ WEIGHT_EXPLICIT_RATING = 0.35
+ ADDED_TO_LIBRARY_WEIGHT = 0.08
def process_item(self, raw_item: dict) -> ScoredItem:
"""
@@ -64,21 +64,68 @@ def _calculate_score_components(self, item: StremioLibraryItem) -> dict:
completion_score = 0.0
completion_rate = 0.0
- if state.duration > 0:
- ratio = min(state.timeWatched / state.duration, 1.0)
- completion_score = ratio * 100
+ # Prefer ratio-based completion when duration is available to avoid
+ # treating short partial plays as full completion just because
+ # `timesWatched` was incremented. If duration is missing, fall back
+ # to conservative estimates based on timesWatched/flaggedWatched.
+ if state.duration and state.duration > 0:
+ try:
+ ratio = min(float(state.timeWatched) / float(state.duration), 1.0)
+ except Exception:
+ ratio = 0.0
completion_rate = ratio
+ completion_score = ratio * 100.0
+
+ # If the item was explicitly marked watched or has timesWatched but
+ # the observed ratio is very small, give a modest boost (not full 100).
+ if (state.timesWatched > 0 or state.flaggedWatched > 0) and completion_score < 50.0:
+ completion_score = max(completion_score, 50.0)
+ completion_rate = max(completion_rate, 0.5)
elif state.timesWatched > 0 or state.flaggedWatched > 0:
- completion_score = 100.0
- completion_rate = 1.0
+ # No duration information: use a conservative assumed completion.
+ completion_score = 80.0
+ completion_rate = 0.8
# 2. Rewatch Bonus
+ # We compute rewatch strength using two complementary metrics:
+ # - times_based: how many extra explicit watches the user has (timesWatched - 1)
+ # - ratio_based: overallTimeWatched / duration measures how many full-length equivalents
+ # If duration is missing we fall back to conservative estimators to avoid false positives.
rewatch_score = 0.0
is_rewatched = False
- if state.timesWatched > 1:
- rewatch_score = min((state.timesWatched - 1) * 50, 100)
+ if state.timesWatched > 1 and not state.flaggedWatched:
is_rewatched = True
+ # times-based component (each extra watch gives a boost)
+ times_component = (state.timesWatched - 1) * 50
+
+ # ratio-based component: how many full durations the user has watched in total
+ ratio_component = 0.0
+ try:
+ overall_timewatched = float(state.overallTimeWatched or 0)
+ duration = float(state.duration or 0)
+ if duration > 0 and overall_timewatched > 0:
+ watch_ratio = overall_timewatched / duration
+ ratio_component = max((watch_ratio - 1.0) * 100.0, 0.0)
+ else:
+ # If duration is missing, be conservative: estimate based on timeWatched
+ # If timeWatched exists, assume it approximates one viewing; otherwise use timesWatched
+ time_watched = float(state.timeWatched or 0)
+ if time_watched > 0:
+ # assume a single-view baseline equal to time_watched, so overall/time_watched ~= times
+ ratio_est = (
+ overall_timewatched / time_watched if time_watched > 0 else float(state.timesWatched)
+ )
+ ratio_component = max((ratio_est - 1.0) * 100.0, 0.0)
+ else:
+ ratio_component = max((float(state.timesWatched) - 1.0) * 20.0, 0.0)
+ except Exception:
+ ratio_component = 0.0
+
+ # Combine components but clamp to reasonable bounds
+ combined = max(times_component, ratio_component)
+ rewatch_score = min(combined, 100.0)
+
# 3. Recency Score
recency_score = 0.0
is_recent = False
@@ -92,7 +139,7 @@ def _calculate_score_components(self, item: StremioLibraryItem) -> dict:
days_since = (now - last_watched).days
if days_since < 7:
- recency_score = 150
+ recency_score = 200
is_recent = True
elif days_since < 30:
recency_score = 100
@@ -100,9 +147,9 @@ def _calculate_score_components(self, item: StremioLibraryItem) -> dict:
elif days_since < 90:
recency_score = 70
elif days_since < 180:
- recency_score = 40
+ recency_score = 30
elif days_since < 365:
- recency_score = 20
+ recency_score = 10
# 4. Explicit Rating Score
rating_score = 0.0
diff --git a/app/services/stremio_service.py b/app/services/stremio_service.py
index 904d8d3..1bee937 100644
--- a/app/services/stremio_service.py
+++ b/app/services/stremio_service.py
@@ -1,4 +1,5 @@
import asyncio
+import random
from urllib.parse import urlparse
import httpx
@@ -14,11 +15,16 @@
def match_hostname(url: str, hostname: str) -> bool:
+ """Return True if the URL host matches the target host (scheme-agnostic).
+
+ Accepts `hostname` as either a naked host (example.com) or full URL (https://example.com).
"""
- Checks if the hostname extracted from a URL matches a given hostname string.
- """
- parsed_url = urlparse(url)
- return parsed_url.hostname == hostname
+ try:
+ url_host = urlparse(url if "://" in url else f"https://{url}").hostname
+ target_host = urlparse(hostname if "://" in hostname else f"https://{hostname}").hostname
+ return bool(url_host and target_host and url_host.lower() == target_host.lower())
+ except Exception:
+ return False
class StremioService:
@@ -44,8 +50,13 @@ async def _get_client(self) -> httpx.AsyncClient:
"""Get or create the main Stremio API client."""
if self._client is None:
self._client = httpx.AsyncClient(
- timeout=30.0,
+ timeout=10.0,
limits=httpx.Limits(max_keepalive_connections=10, max_connections=50),
+ http2=True,
+ headers={
+ "User-Agent": "Watchly/Client",
+ "Accept": "application/json",
+ },
)
return self._client
@@ -53,8 +64,13 @@ async def _get_likes_client(self) -> httpx.AsyncClient:
"""Get or create the likes API client."""
if self._likes_client is None:
self._likes_client = httpx.AsyncClient(
- timeout=30.0,
+ timeout=10.0,
limits=httpx.Limits(max_keepalive_connections=10, max_connections=50),
+ http2=True,
+ headers={
+ "User-Agent": "Watchly/Client",
+ "Accept": "application/json",
+ },
)
return self._likes_client
@@ -81,9 +97,8 @@ async def _login_for_auth_key(self) -> str:
try:
client = await self._get_client()
- result = await client.post(url, json=payload)
- result.raise_for_status()
- data = result.json()
+ result = await self._post_with_retries(client, url, json=payload)
+ data = result
auth_key = data.get("result", {}).get("authKey", "")
if auth_key:
logger.info("Successfully authenticated with Stremio")
@@ -124,14 +139,8 @@ async def is_loved(self, auth_key: str, imdb_id: str, media_type: str) -> tuple[
try:
client = await self._get_likes_client()
- result = await client.get(url, params=params)
- result.raise_for_status()
- status = result.json().get("status", "")
- # Stremio returns "loved" for loved items
- # We assume there might be a "liked" status or we can infer based on user input
- # For now, the API mainly returns 'loved' or nothing.
- # If the user mentioned a specific "liked" signal, it might be a different value or endpoint.
- # Assuming "liked" is a valid return value for now based on user query.
+ result = await self._get_with_retries(client, url, params=params)
+ status = result.get("status", "")
return (status == "loved", status == "liked")
except Exception as e:
logger.error(
@@ -141,22 +150,28 @@ async def is_loved(self, auth_key: str, imdb_id: str, media_type: str) -> tuple[
return False, False
@alru_cache(maxsize=1000, ttl=3600)
- async def get_loved_items(self, auth_token: str, media_type: str) -> list[dict]:
- async with httpx.AsyncClient() as client:
- url = f"https://likes.stremio.com/addons/loved/movies-shows/{auth_token}/catalog/{media_type}/stremio-loved-{media_type.lower()}.json" # noqa: E501
- response = await client.get(url)
- response.raise_for_status()
- metas = response.json().get("metas", [])
+ async def get_loved_items(self, auth_token: str, media_type: str) -> list[str]:
+ url = f"https://likes.stremio.com/addons/loved/movies-shows/{auth_token}/catalog/{media_type}/stremio-loved-{media_type.lower()}.json" # noqa
+ try:
+ client = await self._get_likes_client()
+ data = await self._get_with_retries(client, url)
+ metas = data.get("metas", [])
return [meta.get("id") for meta in metas]
+ except Exception as e:
+ logger.warning(f"Failed to fetch loved items: {e}")
+ return []
@alru_cache(maxsize=1000, ttl=3600)
- async def get_liked_items(self, auth_token: str, media_type: str) -> list[dict]:
- async with httpx.AsyncClient() as client:
- url = f"https://likes.stremio.com/addons/liked/movies-shows/{auth_token}/catalog/{media_type}/stremio-liked-{media_type.lower()}.json" # noqa: E501
- response = await client.get(url)
- response.raise_for_status()
- metas = response.json().get("metas", [])
+ async def get_liked_items(self, auth_token: str, media_type: str) -> list[str]:
+ url = f"https://likes.stremio.com/addons/liked/movies-shows/{auth_token}/catalog/{media_type}/stremio-liked-{media_type.lower()}.json" # noqa
+ try:
+ client = await self._get_likes_client()
+ data = await self._get_with_retries(client, url)
+ metas = data.get("metas", [])
return [meta.get("id") for meta in metas]
+ except Exception as e:
+ logger.warning(f"Failed to fetch liked items: {e}")
+ return []
async def get_user_info(self) -> dict[str, str]:
"""Fetch user ID and email using the auth key."""
@@ -171,9 +186,7 @@ async def get_user_info(self) -> dict[str, str]:
try:
client = await self._get_client()
- result = await client.post(url, json=payload)
- result.raise_for_status()
- data = result.json()
+ data = await self._post_with_retries(client, url, json=payload)
if "error" in data:
error_msg = data["error"]
@@ -224,77 +237,94 @@ async def get_library_items(self) -> dict[str, list[dict]]:
}
client = await self._get_client()
- result = await client.post(url, json=payload)
- result.raise_for_status()
- items = result.json().get("result", [])
+ data = await self._post_with_retries(client, url, json=payload)
+ items = data.get("result", [])
logger.info(f"Fetched {len(items)} library items from Stremio")
- # Filter only items that user has watched
- watched_items = [
- item
- for item in items
- if (
- item.get("state", {}).get("timesWatched", 0) > 0
- and item.get("type") in ["movie", "series"]
- and item.get("_id").startswith("tt")
- )
- ]
+ # Filter items considered watched: explicit timesWatched/flaggedWatched OR high completion ratio
+ watched_items = []
+ for item in items:
+ if item.get("type") not in ["movie", "series"]:
+ continue
+ item_id = item.get("_id", "")
+ if not item_id.startswith("tt"):
+ continue
+ state = item.get("state", {}) or {}
+ times_watched = int(state.get("timesWatched") or 0)
+ flagged_watched = int(state.get("flaggedWatched") or 0)
+ duration = int(state.get("duration") or 0)
+ time_watched = int(state.get("timeWatched") or 0)
+ ratio_ok = duration > 0 and (time_watched / duration) >= 0.7
+ if times_watched > 0 or flagged_watched > 0 or ratio_ok:
+ watched_items.append(item)
logger.info(f"Filtered {len(watched_items)} watched library items")
- # Sort watched items by watched time (most recent first)
- watched_items.sort(key=lambda x: x.get("state", {}).get("lastWatched", ""), reverse=True)
+ # Sort watched items by lastWatched, fallback to _mtime (most recent first)
+ def _sort_key(x: dict):
+ state = x.get("state", {}) or {}
+ return (
+ str(state.get("lastWatched") or ""),
+ str(x.get("_mtime") or ""),
+ )
+
+ watched_items.sort(key=_sort_key, reverse=True)
- # is_loved only until we find 10 movies and 10 series
loved_items = []
- movies_found = 0
- series_found = 0
- target_count = settings.RECOMMENDATION_SOURCE_ITEMS_LIMIT
- batch_size = 20
-
- # Process in batches to stop early
- for i in range(0, len(watched_items), batch_size):
- if movies_found >= target_count and series_found >= target_count:
- logger.info("Found enough loved items, stopping check")
- break
-
- batch = watched_items[i : i + batch_size] # noqa: E203
-
- # Filter batch to only check types we still need
- check_candidates = []
- for item in batch:
- itype = item.get("type")
- if itype == "movie" and movies_found < target_count:
- check_candidates.append(item)
- elif itype == "series" and series_found < target_count:
- check_candidates.append(item)
-
- if not check_candidates:
- continue
+ added_items = []
+ removed_items = []
- # Check loved status for candidates in parallel
- loved_statuses = await asyncio.gather(
- *[self.is_loved(auth_key, item.get("_id"), item.get("type")) for item in check_candidates]
- )
+ # fetch loved and liked items
- # Process results
- for item, (is_loved_status, is_liked_status) in zip(check_candidates, loved_statuses):
- if is_loved_status or is_liked_status:
- # Store status on item for scoring later
- item["_is_loved"] = is_loved_status
- item["_is_liked"] = is_liked_status
-
- loved_items.append(item)
- if item.get("type") == "movie":
- movies_found += 1
- elif item.get("type") == "series":
- series_found += 1
-
- logger.info(
- f"Found {len(loved_items)} loved library items (Movies: {movies_found}, Series: {series_found})"
+ loved_movies, loved_series, liked_movies, liked_series = await asyncio.gather(
+ self.get_loved_items(auth_key, "movie"),
+ self.get_loved_items(auth_key, "series"),
+ self.get_liked_items(auth_key, "movie"),
+ self.get_liked_items(auth_key, "series"),
)
+ watched_ids = {i.get("_id") for i in watched_items}
+
+ for item in watched_items:
+ loved = False
+ if item.get("_id") in loved_movies or item.get("_id") in loved_series:
+ item["_is_loved"] = True
+ loved = True
+ if item.get("_id") in liked_movies or item.get("_id") in liked_series:
+ item["_is_liked"] = True
+ loved = True
+
+ if loved:
+ loved_items.append(item)
+
+ logger.info(f"Found {len(loved_items)} loved library items")
+
+ # Build added-only items: in library, type movie/series, imdb id, not watched, not loved/liked
+ for item in items:
+ if item.get("type") not in ["movie", "series"]:
+ continue
+ iid = item.get("_id", "")
+ if not iid.startswith("tt"):
+ continue
+ if iid in watched_ids:
+ continue
+ if iid in loved_movies or iid in loved_series or iid in liked_movies or iid in liked_series:
+ continue
+ if item.get("temp"):
+ continue
+ if item.get("removed"):
+ removed_items.append(item)
+ continue
+
+ added_items.append(item)
+
+ logger.info(f"Found {len(added_items)} added (unwatched) and {len(removed_items)} removed library items")
# Return raw items; ScoringService will handle Pydantic conversion
- return {"watched": watched_items, "loved": loved_items}
+ return {
+ "watched": watched_items,
+ "loved": loved_items,
+ "added": added_items,
+ "removed": removed_items,
+ }
except Exception as e:
logger.error(f"Error fetching library items: {e}", exc_info=True)
return {"watched": [], "loved": []}
@@ -308,9 +338,7 @@ async def get_addons(self, auth_key: str | None = None) -> list[dict]:
"update": True,
}
client = await self._get_client()
- result = await client.post(url, json=payload)
- result.raise_for_status()
- data = result.json()
+ data = await self._post_with_retries(client, url, json=payload)
error_payload = data.get("error")
if not error_payload and (data.get("code") and data.get("message")):
error_payload = data
@@ -337,10 +365,9 @@ async def update_addon(self, addons: list[dict], auth_key: str | None = None):
}
client = await self._get_client()
- result = await client.post(url, json=payload)
- result.raise_for_status()
+ data = await self._post_with_retries(client, url, json=payload)
logger.info("Updated addons")
- return result.json().get("result", {}).get("success", False)
+ return data.get("result", {}).get("success", False)
async def update_catalogs(self, catalogs: list[dict], auth_key: str | None = None):
auth_key = auth_key or await self.get_auth_key()
@@ -366,3 +393,69 @@ async def is_addon_installed(self, auth_key: str | None = None):
):
return True
return False
+
+ async def _post_with_retries(self, client: httpx.AsyncClient, url: str, json: dict, max_tries: int = 3) -> dict:
+ attempts = 0
+ last_exc: Exception | None = None
+ while attempts < max_tries:
+ try:
+ resp = await client.post(url, json=json)
+ resp.raise_for_status()
+ return resp.json()
+ except httpx.HTTPStatusError as e:
+ status = e.response.status_code
+ if status == 429 or 500 <= status < 600:
+ attempts += 1
+ backoff = (2 ** (attempts - 1)) + random.uniform(0, 0.25)
+ logger.warning(
+ f"Stremio POST {url} failed with {status}; retry {attempts}/{max_tries} in" f" {backoff:.2f}s"
+ )
+ await asyncio.sleep(backoff)
+ last_exc = e
+ continue
+ raise
+ except httpx.RequestError as e:
+ attempts += 1
+ backoff = (2 ** (attempts - 1)) + random.uniform(0, 0.25)
+ logger.warning(
+ f"Stremio POST {url} request error: {e}; retry {attempts}/{max_tries} in {backoff:.2f}s"
+ )
+ await asyncio.sleep(backoff)
+ last_exc = e
+ continue
+ if last_exc:
+ raise last_exc
+ return {}
+
+ async def _get_with_retries(
+ self, client: httpx.AsyncClient, url: str, params: dict | None = None, max_tries: int = 3
+ ) -> dict:
+ attempts = 0
+ last_exc: Exception | None = None
+ while attempts < max_tries:
+ try:
+ resp = await client.get(url, params=params)
+ resp.raise_for_status()
+ return resp.json()
+ except httpx.HTTPStatusError as e:
+ status = e.response.status_code
+ if status == 429 or 500 <= status < 600:
+ attempts += 1
+ backoff = (2 ** (attempts - 1)) + random.uniform(0, 0.25)
+ logger.warning(
+ f"Stremio GET {url} failed with {status}; retry {attempts}/{max_tries} in" f" {backoff:.2f}s"
+ )
+ await asyncio.sleep(backoff)
+ last_exc = e
+ continue
+ raise
+ except httpx.RequestError as e:
+ attempts += 1
+ backoff = (2 ** (attempts - 1)) + random.uniform(0, 0.25)
+ logger.warning(f"Stremio GET {url} request error: {e}; retry {attempts}/{max_tries} in {backoff:.2f}s")
+ await asyncio.sleep(backoff)
+ last_exc = e
+ continue
+ if last_exc:
+ raise last_exc
+ return {}
diff --git a/app/services/tmdb_service.py b/app/services/tmdb_service.py
index c8a7a20..1ea9a5c 100644
--- a/app/services/tmdb_service.py
+++ b/app/services/tmdb_service.py
@@ -1,8 +1,12 @@
+import asyncio
+import random
+
import httpx
from async_lru import alru_cache
from loguru import logger
from app.core.config import settings
+from app.core.version import __version__
class TMDBService:
@@ -23,6 +27,11 @@ async def _get_client(self) -> httpx.AsyncClient:
self._client = httpx.AsyncClient(
timeout=10.0,
limits=httpx.Limits(max_keepalive_connections=20, max_connections=100),
+ http2=True,
+ headers={
+ "User-Agent": f"Watchly/{__version__} (+https://github.com/TimilsinaBimal/Watchly)",
+ "Accept": "application/json",
+ },
)
return self._client
@@ -42,27 +51,49 @@ async def _make_request(self, endpoint: str, params: dict | None = None) -> dict
if params:
default_params.update(params)
- try:
- client = await self._get_client()
- response = await client.get(url, params=default_params)
- response.raise_for_status()
-
- # Check if response has content
- if not response.text:
- logger.warning(f"TMDB API returned empty response for {endpoint}")
- return {}
-
+ attempts = 0
+ last_exc: Exception | None = None
+ while attempts < 3:
try:
- return response.json()
- except ValueError as e:
- logger.error(f"TMDB API returned invalid JSON for {endpoint}: {e}. Response: {response.text[:200]}")
- return {}
- except httpx.HTTPStatusError as e:
- logger.error(f"TMDB API error for {endpoint}: {e.response.status_code} - {e.response.text[:200]}")
- raise
- except httpx.RequestError as e:
- logger.error(f"TMDB API request error for {endpoint}: {e}")
- raise
+ client = await self._get_client()
+ response = await client.get(url, params=default_params)
+ response.raise_for_status()
+
+ if not response.text:
+ logger.warning(f"TMDB API returned empty response for {endpoint}")
+ return {}
+
+ try:
+ return response.json()
+ except ValueError as e:
+ logger.error(
+ f"TMDB API returned invalid JSON for {endpoint}: {e}. Response: {response.text[:200]}"
+ )
+ return {}
+ except httpx.HTTPStatusError as e:
+ status = e.response.status_code
+ # Retry on 429 or 5xx
+ if status == 429 or 500 <= status < 600:
+ attempts += 1
+ backoff = (2 ** (attempts - 1)) + random.uniform(0, 0.25)
+ logger.warning(f"TMDB {endpoint} failed with {status}; retry {attempts}/3 in {backoff:.2f}s")
+ await asyncio.sleep(backoff)
+ last_exc = e
+ continue
+ logger.error(f"TMDB API error for {endpoint}: {status} - {e.response.text[:200]}")
+ raise
+ except httpx.RequestError as e:
+ attempts += 1
+ backoff = (2 ** (attempts - 1)) + random.uniform(0, 0.25)
+ logger.warning(f"TMDB request error for {endpoint}: {e}; retry {attempts}/3 in {backoff:.2f}s")
+ await asyncio.sleep(backoff)
+ last_exc = e
+ continue
+
+ # Exhausted retries
+ if last_exc:
+ raise last_exc
+ return {}
@alru_cache(maxsize=2000)
async def find_by_imdb_id(self, imdb_id: str) -> tuple[int | None, str | None]:
diff --git a/app/services/token_store.py b/app/services/token_store.py
index b885e7e..304a6be 100644
--- a/app/services/token_store.py
+++ b/app/services/token_store.py
@@ -62,7 +62,14 @@ def decrypt_token(self, enc: str) -> str:
async def _get_client(self) -> redis.Redis:
if self._client is None:
- self._client = redis.from_url(settings.REDIS_URL, decode_responses=True, encoding="utf-8")
+ # Add socket timeouts to avoid hanging on Redis operations
+ self._client = redis.from_url(
+ settings.REDIS_URL,
+ decode_responses=True,
+ encoding="utf-8",
+ socket_connect_timeout=5,
+ socket_timeout=5,
+ )
return self._client
def _format_key(self, token: str) -> str:
diff --git a/app/services/user_profile.py b/app/services/user_profile.py
index 07a3822..2c0cbba 100644
--- a/app/services/user_profile.py
+++ b/app/services/user_profile.py
@@ -1,3 +1,4 @@
+import asyncio
from collections import defaultdict
from app.models.profile import UserTasteProfile
@@ -57,28 +58,38 @@ async def build_user_profile(
"countries": defaultdict(float),
}
- for item in scored_items:
+ async def _process(item):
# Filter by content type if specified
if content_type and item.item.type != content_type:
- continue
+ return None
# Resolve ID
tmdb_id = await self._resolve_tmdb_id(item.item.id)
if not tmdb_id:
- continue
+ return None
# Fetch full details including keywords and credits
meta = await self._fetch_full_metadata(tmdb_id, item.item.type)
if not meta:
- continue
+ return None
# Vectorize this single item
item_vector = self._vectorize_item(meta)
- # Weighted Aggregation
# Scale by Interest Score (0.0 - 1.0)
interest_weight = item.score / 100.0
+ return item_vector, interest_weight
+
+ # Launch all item processing coroutines in parallel
+ tasks = [_process(item) for item in scored_items]
+ results = await asyncio.gather(*tasks)
+
+ # Merge results sequentially to avoid interleaved writes
+ for res in results:
+ if res is None:
+ continue
+ item_vector, interest_weight = res
self._merge_vector(profile_data, item_vector, interest_weight, excluded_genres)
# Convert to Pydantic Model
@@ -156,6 +167,25 @@ def calculate_similarity(self, profile: UserTasteProfile, item_meta: dict) -> fl
s = safe_div(s, len(item_vec["countries"]))
score += s * COUNTRIES_WEIGHT
+ # 6. YEAR/DECADE
+ # Reward matches on the user's preferred decades, with soft credit to adjacent decades.
+ item_year = item_vec.get("year")
+ if item_year is not None:
+ base_pref = profile.years.values.get(item_year, 0.0)
+ if base_pref > 0:
+ score += emphasis(base_pref) * YEAR_WEIGHT
+ else:
+ # Soft-match adjacent decades at half strength
+ prev_decade = item_year - 10
+ next_decade = item_year + 10
+ neighbor_pref = 0.0
+ if prev_decade in profile.years.values:
+ neighbor_pref = max(neighbor_pref, profile.years.values.get(prev_decade, 0.0))
+ if next_decade in profile.years.values:
+ neighbor_pref = max(neighbor_pref, profile.years.values.get(next_decade, 0.0))
+ if neighbor_pref > 0:
+ score += emphasis(neighbor_pref) * (YEAR_WEIGHT * 0.5)
+
return score
def _vectorize_item(self, meta: dict) -> dict[str, list[int] | int | list[str] | None]:
diff --git a/app/startup/migration.py b/app/startup/migration.py
index e167c89..432df02 100644
--- a/app/startup/migration.py
+++ b/app/startup/migration.py
@@ -35,7 +35,7 @@ async def get_auth_key(username: str, password: str):
"type": "Login",
"facebook": False,
}
- async with httpx.AsyncClient() as client:
+ async with httpx.AsyncClient(timeout=10.0) as client:
result = await client.post(url, json=payload)
result.raise_for_status()
data = result.json()
@@ -49,7 +49,7 @@ async def get_user_info(auth_key):
"type": "GetUser",
"authKey": auth_key,
}
- async with httpx.AsyncClient() as client:
+ async with httpx.AsyncClient(timeout=10.0) as client:
response = await client.post(url, json=payload)
response.raise_for_status()
data = response.json()
@@ -66,7 +66,7 @@ async def get_addons(auth_key: str):
"authKey": auth_key,
"update": True,
}
- async with httpx.AsyncClient() as client:
+ async with httpx.AsyncClient(timeout=10.0) as client:
result = await client.post(url, json=payload)
result.raise_for_status()
data = result.json()
@@ -101,7 +101,7 @@ async def update_addon_url(auth_key: str, user_id: str):
"addons": addons,
}
- async with httpx.AsyncClient() as client:
+ async with httpx.AsyncClient(timeout=10.0) as client:
result = await client.post(url, json=payload)
result.raise_for_status()
logger.info("Updated addon url")
@@ -117,7 +117,7 @@ async def decode_old_payloads(encrypted_raw: str):
return payload
-async def encrypt_auth_key(auth_key):
+def encrypt_auth_key(auth_key):
salt = b"x7FDf9kypzQ1LmR32b8hWv49sKq2Pd8T"
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(),
@@ -198,7 +198,7 @@ async def process_migration_key(redis_client: redis.Redis, key: str) -> bool:
new_payload = prepare_default_payload(email, user_id)
if auth_key:
- new_payload["authKey"] = await encrypt_auth_key(auth_key)
+ new_payload["authKey"] = encrypt_auth_key(auth_key)
new_key = user_id.strip()
payload_json = json.dumps(new_payload)
diff --git a/app/static/index.html b/app/static/index.html
index 91a58f2..96b9773 100644
--- a/app/static/index.html
+++ b/app/static/index.html
@@ -71,6 +71,17 @@
.nav-item.disabled {
@apply opacity-50 cursor-not-allowed pointer-events-none;
}
+
+ /* Announcement link styling to ensure visibility */
+ #announcement-content a {
+ color: #60a5fa; /* blue-400 */
+ text-decoration: underline;
+ }
+
+ #announcement-content a:hover {
+ color: #ffffff;
+ text-decoration: underline;
+ }
@@ -231,6 +242,18 @@
Discover movies and series tailored to your unique taste, powered by your Stremio library and watch history.
+
+
diff --git a/app/static/script.js b/app/static/script.js
index 66616dc..c4066c9 100644
--- a/app/static/script.js
+++ b/app/static/script.js
@@ -68,6 +68,7 @@ document.addEventListener('DOMContentLoaded', () => {
initializeStremioLogin();
initializeFooter();
initializeKofi();
+ initializeAnnouncement();
// Next Buttons
if (configNextBtn) configNextBtn.addEventListener('click', () => switchSection('catalogs'));
@@ -910,3 +911,31 @@ function initializeKofi() {
}
});
}
+
+// Announcement: fetch small message/HTML from API and render in the home hero
+async function initializeAnnouncement() {
+ const container = document.getElementById('announcement');
+ const content = document.getElementById('announcement-content');
+ if (!container || !content) return;
+
+ try {
+ const res = await fetch('/announcement');
+ if (!res.ok) return;
+
+ let data = null;
+ try { data = await res.json(); } catch (e) { data = null; }
+
+ let html = '';
+ if (data) html = data.html || data.message || '';
+ if (!html) {
+ try { html = await res.text(); } catch (e) { html = ''; }
+ }
+
+ if (!html) return;
+
+ content.innerHTML = html;
+ container.classList.remove('hidden');
+ } catch (e) {
+ // silent
+ }
+}
diff --git a/pyproject.toml b/pyproject.toml
index 33e904c..2632128 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -12,7 +12,7 @@ dependencies = [
"deep-translator>=1.11.4",
"fastapi>=0.104.1",
"google-genai>=1.54.0",
- "httpx>=0.25.2",
+ "httpx[http2]>=0.25.2",
"loguru>=0.7.2",
"pydantic>=2.5.0",
"pydantic-settings>=2.1.0",
diff --git a/uv.lock b/uv.lock
index 79a7e75..3397e59 100644
--- a/uv.lock
+++ b/uv.lock
@@ -523,6 +523,28 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" },
]
+[[package]]
+name = "h2"
+version = "4.3.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "hpack" },
+ { name = "hyperframe" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/1d/17/afa56379f94ad0fe8defd37d6eb3f89a25404ffc71d4d848893d270325fc/h2-4.3.0.tar.gz", hash = "sha256:6c59efe4323fa18b47a632221a1888bd7fde6249819beda254aeca909f221bf1", size = 2152026, upload-time = "2025-08-23T18:12:19.778Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/69/b2/119f6e6dcbd96f9069ce9a2665e0146588dc9f88f29549711853645e736a/h2-4.3.0-py3-none-any.whl", hash = "sha256:c438f029a25f7945c69e0ccf0fb951dc3f73a5f6412981daee861431b70e2bdd", size = 61779, upload-time = "2025-08-23T18:12:17.779Z" },
+]
+
+[[package]]
+name = "hpack"
+version = "4.1.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/2c/48/71de9ed269fdae9c8057e5a4c0aa7402e8bb16f2c6e90b3aa53327b113f8/hpack-4.1.0.tar.gz", hash = "sha256:ec5eca154f7056aa06f196a557655c5b009b382873ac8d1e66e79e87535f1dca", size = 51276, upload-time = "2025-01-22T21:44:58.347Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/07/c6/80c95b1b2b94682a72cbdbfb85b81ae2daffa4291fbfa1b1464502ede10d/hpack-4.1.0-py3-none-any.whl", hash = "sha256:157ac792668d995c657d93111f46b4535ed114f0c9c8d672271bbec7eae1b496", size = 34357, upload-time = "2025-01-22T21:44:56.92Z" },
+]
+
[[package]]
name = "httpcore"
version = "1.0.9"
@@ -594,6 +616,20 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" },
]
+[package.optional-dependencies]
+http2 = [
+ { name = "h2" },
+]
+
+[[package]]
+name = "hyperframe"
+version = "6.1.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/02/e7/94f8232d4a74cc99514c13a9f995811485a6903d48e5d952771ef6322e30/hyperframe-6.1.0.tar.gz", hash = "sha256:f630908a00854a7adeabd6382b43923a4c4cd4b821fcb527e6ab9e15382a3b08", size = 26566, upload-time = "2025-01-22T21:41:49.302Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/48/30/47d0bf6072f7252e6521f3447ccfa40b421b6824517f82854703d0f5a98b/hyperframe-6.1.0-py3-none-any.whl", hash = "sha256:b03380493a519fce58ea5af42e4a42317bf9bd425596f7a0835ffce80f1a42e5", size = 13007, upload-time = "2025-01-22T21:41:47.295Z" },
+]
+
[[package]]
name = "identify"
version = "2.6.15"
@@ -1349,7 +1385,7 @@ dependencies = [
{ name = "deep-translator" },
{ name = "fastapi" },
{ name = "google-genai" },
- { name = "httpx" },
+ { name = "httpx", extra = ["http2"] },
{ name = "loguru" },
{ name = "pydantic" },
{ name = "pydantic-settings" },
@@ -1374,7 +1410,7 @@ requires-dist = [
{ name = "deep-translator", specifier = ">=1.11.4" },
{ name = "fastapi", specifier = ">=0.104.1" },
{ name = "google-genai", specifier = ">=1.54.0" },
- { name = "httpx", specifier = ">=0.25.2" },
+ { name = "httpx", extras = ["http2"], specifier = ">=0.25.2" },
{ name = "loguru", specifier = ">=0.7.2" },
{ name = "pydantic", specifier = ">=2.5.0" },
{ name = "pydantic-settings", specifier = ">=2.1.0" },