diff --git a/CLAUDE.md b/CLAUDE.md index b55f01fb..b564bd8c 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -34,6 +34,7 @@ export default ({ title }) => { /* ... */ }; - **Use strict typing** - avoid `any` type - **Define return types** for complex functions - **Use union types** for state enums and options +- **Run type checking**: Use `bun run check` for comprehensive TypeScript validation ```tsx // ✅ Good: Strict typing @@ -547,6 +548,53 @@ function ErrorBoundary({ children }: { children: React.ReactNode }) { } ``` +## Icons and Assets + +### Lucide React Icons + +Use Lucide React for all icons throughout the application: + +```tsx +// ✅ Good: Use Lucide React icons +import { Home, Settings, User, ChevronDown, X } from 'lucide-react'; + +function NavigationMenu() { + return ( + + ); +} + +// ✅ Good: Icon sizing with consistent classes + + +// ✅ Good: Icons in alerts +
+ +
Success message here
+
+``` + +**Icon Guidelines:** +- **Consistent sizing**: Use `h-4 w-4` for small icons, `h-5 w-5` for medium, `h-6 w-6` for larger +- **Accessibility**: Add `aria-hidden="true"` for decorative icons +- **Semantic naming**: Import icons with descriptive names that match their usage +- **Performance**: Only import the specific icons you need + ## Project-Specific Conventions ### API Integration Patterns @@ -649,11 +697,14 @@ function App() { ## Development Workflow ### Before Committing -1. **Run code quality checks**: `bun run check` (linting, formatting, and code quality) +1. **Run type checking**: `bun run check` (TypeScript validation, linting, formatting, and code quality) 2. **Test build**: `bun run build` (TypeScript compilation + Vite build) 3. **Review changes**: Ensure code follows these standards -Note: Use `bun run lint` for linting-only checks when needed. +**Command Reference:** +- `bun run check` - Comprehensive validation (TypeScript + linting + formatting) +- `bun run lint` - Linting-only checks when needed +- `bun run build` - Production build validation ### Code Review Checklist - [ ] Components use TypeScript interfaces diff --git a/cmd/altmount/cmd/serve.go b/cmd/altmount/cmd/serve.go index cdfd8929..6c46da28 100644 --- a/cmd/altmount/cmd/serve.go +++ b/cmd/altmount/cmd/serve.go @@ -2,7 +2,7 @@ package cmd import ( "context" - "encoding/json" + "fmt" "log/slog" "net/http" "os" @@ -11,6 +11,10 @@ import ( "time" "github.com/go-pkgz/auth/v2/token" + "github.com/gofiber/fiber/v2" + "github.com/gofiber/fiber/v2/middleware/adaptor" + "github.com/gofiber/fiber/v2/middleware/filesystem" + fLogger "github.com/gofiber/fiber/v2/middleware/logger" "github.com/javi11/altmount/frontend" "github.com/javi11/altmount/internal/api" "github.com/javi11/altmount/internal/arrs" @@ -68,7 +72,7 @@ func runServe(cmd *cobra.Command, args []string) error { } // Setup log rotation with the loaded configuration - logger := slogutil.SetupLogRotationWithFallback(cfg.Log, cfg.LogLevel) + logger := slogutil.SetupLogRotationWithFallback(cfg.Log, cfg.Log.Level) slog.SetDefault(logger) logger.Info("Directory validation successful", @@ -78,7 +82,7 @@ func runServe(cmd *cobra.Command, args []string) error { logger.Info("Starting AltMount server with log rotation configured", "log_file", cfg.Log.File, - "log_level", getEffectiveLogLevel(cfg.Log.Level, cfg.LogLevel), + "log_level", getEffectiveLogLevel(cfg.Log.Level, cfg.Log.Level), "max_size_mb", cfg.Log.MaxSize, "max_age_days", cfg.Log.MaxAge, "max_backups", cfg.Log.MaxBackups, @@ -174,8 +178,37 @@ func runServe(cmd *cobra.Command, args []string) error { _ = nsys.Close() }() - // Create shared HTTP mux - mux := http.NewServeMux() + // Create Fiber app + app := fiber.New(fiber.Config{ + RequestMethods: append( + fiber.DefaultMethods, "PROPFIND", "PROPPATCH", "MKCOL", "COPY", "MOVE", "LOCK", "UNLOCK", + ), + ErrorHandler: func(c *fiber.Ctx, err error) error { + code := fiber.StatusInternalServerError + if e, ok := err.(*fiber.Error); ok { + code = e.Code + } + logger.Error("Fiber error", "path", c.Path(), "method", c.Method(), "error", err) + return c.Status(code).JSON(fiber.Map{ + "error": err.Error(), + }) + }, + }) + + // Conditional Fiber request logging - only in debug mode + // We use a wrapper to allow dynamic enabling/disabling + var debugMode bool + effectiveLogLevel := getEffectiveLogLevel(cfg.Log.Level, cfg.Log.Level) + debugMode = effectiveLogLevel == "debug" + + // Create the logger middleware but wrap it to check debug mode + fiberLogger := fLogger.New() + app.Use(func(c *fiber.Ctx) error { + if debugMode { + return fiberLogger(c) + } + return c.Next() + }) // Declare auth services at function scope so WebDAV can access them var authService *auth.Service @@ -219,7 +252,7 @@ func runServe(cmd *cobra.Command, args []string) error { Prefix: "/api", } - // Create API server with shared mux + // Create API server (now using Fiber directly) apiServer := api.NewServer( apiConfig, mainRepo, @@ -230,14 +263,15 @@ func runServe(cmd *cobra.Command, args []string) error { configManager, nsys.MetadataReader(), poolManager, - mux, nsys.ImporterService(), arrsService) - logger.Info("API server enabled", "prefix", "/api") + + apiServer.SetupRoutes(app) + logger.Info("API server enabled with Fiber routes", "prefix", "/api") // Register API server for auth updates - // Create WebDAV server with shared mux + // Create WebDAV handler for Fiber integration var tokenService *token.Service var webdavUserRepo *database.UserRepository @@ -247,27 +281,27 @@ func runServe(cmd *cobra.Command, args []string) error { webdavUserRepo = userRepo } - server, err := webdav.NewServer(&webdav.Config{ + webdavHandler, err := webdav.NewHandler(&webdav.Config{ Port: cfg.WebDAV.Port, User: cfg.WebDAV.User, Pass: cfg.WebDAV.Password, - Debug: cfg.LogLevel == "debug", + Debug: cfg.Log.Level == "debug", Prefix: "/webdav", - }, nsys.FileSystem(), mux, tokenService, webdavUserRepo, configManager.GetConfigGetter()) + }, nsys.FileSystem(), tokenService, webdavUserRepo, configManager.GetConfigGetter()) if err != nil { - logger.Error("failed to start webdav", "err", err) + logger.Error("failed to create webdav handler", "err", err) return err } // Register WebDAV auth updater with dynamic credentials webdavAuthUpdater := webdav.NewAuthUpdater() - webdavAuthUpdater.SetAuthCredentials(server.GetAuthCredentials()) + webdavAuthUpdater.SetAuthCredentials(webdavHandler.GetAuthCredentials()) // Add WebDAV-specific config change handler configManager.OnConfigChange(func(oldConfig, newConfig *config.Config) { // Sync WebDAV auth credentials if they changed if oldConfig.WebDAV.User != newConfig.WebDAV.User || oldConfig.WebDAV.Password != newConfig.WebDAV.Password { - server.SyncAuthCredentials() + webdavHandler.SyncAuthCredentials() logger.Info("WebDAV auth credentials updated", "old_user", oldConfig.WebDAV.User, "new_user", newConfig.WebDAV.User) @@ -277,12 +311,12 @@ func runServe(cmd *cobra.Command, args []string) error { // Add log level config change handler configManager.OnConfigChange(func(oldConfig, newConfig *config.Config) { // Determine old and new log levels (prioritize Log.Level over LogLevel) - oldLevel := oldConfig.LogLevel + oldLevel := oldConfig.Log.Level if oldConfig.Log.Level != "" { oldLevel = oldConfig.Log.Level } - newLevel := newConfig.LogLevel + newLevel := newConfig.Log.Level if newConfig.Log.Level != "" { newLevel = newConfig.Log.Level } @@ -290,14 +324,16 @@ func runServe(cmd *cobra.Command, args []string) error { // Apply log level change if it changed if oldLevel != newLevel { api.ApplyLogLevel(newLevel) + // Update Fiber logger debug mode + debugMode = newLevel == "debug" logger.Info("Log level updated dynamically", "old_level", oldLevel, - "new_level", newLevel) + "new_level", newLevel, + "fiber_logging", debugMode) } }) - logger.Info("Starting AltMount server", - "webdav_port", cfg.WebDAV.Port, + logger.Info("Initializing AltMount server components", "providers", len(cfg.Providers), "download_workers", cfg.Streaming.MaxDownloadWorkers, "processor_workers", cfg.Import.MaxProcessorWorkers) @@ -351,24 +387,57 @@ func runServe(cmd *cobra.Command, args []string) error { logger.Info("Arrs service is disabled in configuration") } - // Add simple liveness endpoint for Docker health checks - mux.HandleFunc("/live", handleSimpleHealth) + // Add simple liveness endpoint for Docker health checks directly to Fiber + app.Get("/live", handleFiberHealth) + + // Use middleware that bypasses Fiber's method validation + app.All("/webdav*", adaptor.HTTPHandler(webdavHandler.GetHTTPHandler())) - mux.Handle("/", getStaticFileHandler()) + // Set up Fiber SPA routing + setupSPARoutes(app) + + logger.Info("Starting AltMount server with Fiber", + "port", cfg.WebDAV.Port, + "webdav_path", "/webdav", + "api_path", "/api", + "providers", len(cfg.Providers), + "download_workers", cfg.Streaming.MaxDownloadWorkers, + "processor_workers", cfg.Import.MaxProcessorWorkers) + + routes := app.GetRoutes() + for _, route := range routes { + logger.Debug("Fiber route", "path", route.Path, "method", route.Method) + } // Set up signal handling for graceful shutdown sigChan := make(chan os.Signal, 1) signal.Notify(sigChan, syscall.SIGINT, syscall.SIGTERM) - // Start server in goroutine + // Start Fiber server in goroutine + serverErr := make(chan error, 1) go func() { - if err := server.Start(ctx); err != nil { - slog.Error("WebDAV server error", "err", err) + if err := app.Listen(fmt.Sprintf(":%d", cfg.WebDAV.Port)); err != nil { + logger.Error("Fiber server error", "error", err) + serverErr <- err } }() + logger.Info("AltMount server started successfully") + // Wait for shutdown signal or server error - signalHandler(ctx) + select { + case sig := <-sigChan: + logger.Info("Received shutdown signal", "signal", sig.String()) + cancel() // Cancel context to signal all services to stop + case err := <-serverErr: + logger.Error("Server error, shutting down", "error", err) + cancel() + case <-ctx.Done(): + logger.Info("Context cancelled, shutting down") + } + + // Start graceful shutdown sequence + logger.Info("Starting graceful shutdown sequence") // Stop health worker if running if healthWorker != nil { @@ -384,106 +453,53 @@ func runServe(cmd *cobra.Command, args []string) error { logger.Info("Arrs service cleanup completed") } - server.Stop() + // Shutdown Fiber app with timeout + shutdownCtx, shutdownCancel := context.WithTimeout(context.Background(), 30*time.Second) + defer shutdownCancel() + + logger.Info("Shutting down Fiber server...") + if err := app.ShutdownWithContext(shutdownCtx); err != nil { + logger.Error("Error shutting down Fiber app", "error", err) + return err + } + logger.Info("Fiber server shutdown completed") - logger.Info("AltMount server shutting down gracefully") + logger.Info("AltMount server shutdown completed successfully") return nil } -// handleSimpleHealth provides a lightweight liveness check endpoint for Docker -func handleSimpleHealth(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - - response := map[string]interface{}{ +// handleFiberHealth provides a lightweight liveness check endpoint for Docker using Fiber +func handleFiberHealth(c *fiber.Ctx) error { + response := map[string]any{ "status": "ok", "timestamp": time.Now().UTC().Format(time.RFC3339), } - - json.NewEncoder(w).Encode(response) -} - -func signalHandler(ctx context.Context) { - c := make(chan os.Signal, 1) - // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C) - // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught. - signal.Notify(c, os.Interrupt) - - // Block until we receive our signal. - select { - case <-ctx.Done(): - case <-c: - } + return c.JSON(response) } -func getStaticFileHandler() http.Handler { - // Check if we should use embedded filesystem or development path - if _, err := os.Stat(frontendBuildPath); err == nil { - // Development mode - serve from disk with SPA fallback - return createSPAHandler(http.Dir(frontendBuildPath), false) +// setupSPARoutes configures Fiber SPA routing for the frontend +func setupSPARoutes(app *fiber.App) { + // Determine frontend build path + frontendPath := frontendBuildPath + if _, err := os.Stat(frontendBuildPath); err != nil { + // Development mode - serve from disk + frontendPath = "./frontend/dist" } - // Production mode - serve from embedded filesystem with SPA fallback + // Cli mode - use embedded filesystem buildFS, err := frontend.GetBuildFS() if err != nil { - slog.Info("Failed to get embedded filesystem", "error", err) - // Fallback to disk if embedded fails - return createSPAHandler(http.Dir(frontendBuildPath), false) - } - - return createSPAHandler(http.FS(buildFS), true) -} - -// createSPAHandler creates a handler that serves static files with SPA fallback -func createSPAHandler(fs http.FileSystem, isEmbedded bool) http.Handler { - fileServer := http.FileServer(fs) - - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Clean the path - path := r.URL.Path - if path == "/" { - path = "/index.html" - } - - // Try to open the requested file - var file http.File - var err error - - file, err = fs.Open(path) - - // If file exists, serve it normally - if err == nil { - if stat, err := file.Stat(); err == nil && !stat.IsDir() { - file.Close() - fileServer.ServeHTTP(w, r) - return - } - file.Close() - } - - // File doesn't exist or is a directory, check if it's a static asset request - // Static assets typically have file extensions - if hasFileExtension(path) { - // This looks like a static asset request that doesn't exist, return 404 - http.NotFound(w, r) - return - } - - // No file extension - assume it's a client-side route, serve index.html - r.URL.Path = "/index.html" - fileServer.ServeHTTP(w, r) - }) -} - -// hasFileExtension checks if the path appears to be requesting a static asset -func hasFileExtension(path string) bool { - // Common static asset extensions - staticExtensions := []string{".js", ".css", ".png", ".jpg", ".jpeg", ".gif", ".svg", ".ico", ".woff", ".woff2", ".ttf", ".eot", ".map", ".json", ".xml", ".txt"} - - for _, ext := range staticExtensions { - if len(path) >= len(ext) && path[len(path)-len(ext):] == ext { - return true - } + // Docker or development + app.Static("/", frontendPath) + app.Static("*", frontendPath+"/index.html") + } else { + // For embedded filesystem, we'll handle it differently below + app.All("/*", filesystem.New(filesystem.Config{ + Root: http.FS(buildFS), + NotFoundFile: "index.html", + Index: "index.html", + })) + + return } - return false } diff --git a/config.sample.yaml b/config.sample.yaml index 033daec5..93e767db 100644 --- a/config.sample.yaml +++ b/config.sample.yaml @@ -4,67 +4,70 @@ # WebDAV server configuration webdav: port: 8080 - user: "usenet" - password: "usenet" + user: 'usenet' + password: 'usenet' debug: false -# REST API configuration +# REST API configuration api: - prefix: "/api" # API endpoint prefix + prefix: '/api' # API endpoint prefix # Database configuration database: - path: "altmount.db" # Database for processing workflows + path: 'altmount.db' # Database for processing workflows # Metadata filesystem configuration metadata: - root_path: "./metadata" # Directory to store metadata files (required) + root_path: './metadata' # Directory to store metadata files (required) # Streaming and download configuration streaming: - max_range_size: 33554432 # 32MB - Maximum range size for a single request - streaming_chunk_size: 8388608 # 8MB - Chunk size for streaming when end=-1 - max_download_workers: 15 # Number of download workers + max_range_size: 33554432 # 32MB - Maximum range size for a single request + streaming_chunk_size: 8388608 # 8MB - Chunk size for streaming when end=-1 + max_download_workers: 15 # Number of download workers # RClone VFS configuration (optional) rclone: - password: "" # Encryption password (optional) - salt: "" # Encryption salt (optional) - vfs_enabled: false # Enable VFS notifications - vfs_url: "" # RClone VFS URL (e.g., "http://localhost:5572") - vfs_user: "" # VFS authentication username (optional) - vfs_pass: "" # VFS authentication password (optional) + password: '' # Encryption password (optional) + salt: '' # Encryption salt (optional) + vfs_enabled: false # Enable VFS notifications + vfs_url: '' # RClone VFS URL (e.g., "http://localhost:5572") + vfs_user: '' # VFS authentication username (optional) + vfs_pass: '' # VFS authentication password (optional) # Import processing configuration import: - max_processor_workers: 2 # Number of NZB processor workers - queue_processing_interval: 5 # Queue processing interval in seconds + max_processor_workers: 2 # Number of NZB processor workers + queue_processing_interval_seconds: 5 # Queue processing interval in seconds # Health monitoring configuration health: - enabled: true # Enable health monitoring service - auto_repair_enabled: false # Enable automatic repair of corrupted files via ARRs (default: false) + enabled: true # Enable health monitoring service + auto_repair_enabled: false # Enable automatic repair of corrupted files via ARRs (default: false) + check_interval_seconds: 300 # Health check interval in seconds (default: 300 = 5 minutes) + +# WebDAV mount path configuration +mount_path: '' # WebDAV mount path, Example: '/mnt/altmount' or '/mnt/unionfs'. Must be an absolute path starting with / # SABnzbd-compatible API configuration sabnzbd: - enabled: false # Enable SABnzbd-compatible API - mount_dir: "/mnt/altmount" # Directory where WebDAV is mounted - categories: # Download categories (optional) - - name: "movies" + enabled: false # Enable SABnzbd-compatible API + complete_dir: '/altmount/completed' # The complete directory where the files will be imported. + categories: # Download categories (optional) + - name: 'movies' order: 1 priority: 0 - dir: "movies" - - name: "tv" + dir: 'movies' + - name: 'tv' order: 2 priority: 0 - dir: "tv" + dir: 'tv' # Radarr/Sonarr arrs configuration arrs: - enabled: false # Enable arrs service - mount_path: "" # WebDAV mount path to strip from file paths (required when enabled) - radarr_instances: [] # Radarr instances (configured via UI) - sonarr_instances: [] # Sonarr instances (configured via UI) + enabled: false # Enable arrs service + radarr_instances: [] # Radarr instances (configured via UI) + sonarr_instances: [] # Sonarr instances (configured via UI) # Example instance configuration (use the web UI instead): # radarr_instances: # - name: "radarr-main" @@ -79,39 +82,39 @@ arrs: # Logging configuration with rotation support log: - file: "./logs/altmount.log" # Log file path (empty = console only, defaults to same directory as config file) - level: "info" # Log level: debug, info, warn, error - max_size: 100 # Maximum size in MB before rotation - max_age: 30 # Maximum age in days to keep old files - max_backups: 10 # Maximum number of old files to keep - compress: true # Compress old log files + file: './logs/altmount.log' # Log file path (empty = console only, defaults to same directory as config file) + level: 'info' # Log level: debug, info, warn, error + max_size: 100 # Maximum size in MB before rotation + max_age: 30 # Maximum age in days to keep old files + max_backups: 10 # Maximum number of old files to keep + compress: true # Compress old log files # Global log level (legacy - use log.level instead) -log_level: "info" - +log_level: 'info' + # NNTP Providers Configuration # Configure multiple providers for redundancy and load balancing providers: # Primary provider with SSL - - name: "primary-ssl" - host: "ssl-news.provider.com" # Replace with your provider's SSL hostname + - name: 'primary-ssl' + host: 'ssl-news.provider.com' # Replace with your provider's SSL hostname port: 563 - username: "your_username" # Replace with your username - password: "your_password" # Replace with your password + username: 'your_username' # Replace with your username + password: 'your_password' # Replace with your password max_connections: 20 tls: true insecure_tls: false - + # Backup provider without SSL - - name: "backup-standard" - host: "news.provider.com" # Replace with your provider's standard hostname + - name: 'backup-standard' + host: 'news.provider.com' # Replace with your provider's standard hostname port: 119 - username: "your_username" # Replace with your username - password: "your_password" # Replace with your password + username: 'your_username' # Replace with your username + password: 'your_password' # Replace with your password max_connections: 10 tls: false insecure_tls: false - + # Secondary provider (optional) # - name: "secondary-provider" # host: "news.otherprovider.com" @@ -121,9 +124,8 @@ providers: # max_connections: 15 # tls: true # insecure_tls: false - # Configuration Notes: -# +# # 1. REST API: # - Provides HTTP endpoints for queue, health, and configuration management # - Available at http://server:port/api/ when enabled @@ -173,20 +175,26 @@ providers: # - Optional download categories for organization # - Enable for integration with existing download clients # -# 10. Scraper Service: +# 10. Mount Path: +# - Global WebDAV mount path configuration +# - Used by ARRs service to strip mount prefix from file paths +# - Must be an absolute path when ARRs is enabled +# - Example: '/mnt/altmount' or '/mnt/unionfs' +# +# 11. Scraper Service: # - Automatically index files from Radarr and Sonarr instances # - Configure multiple instances of each service type # - Customizable scrape intervals (default 24 hours) # - Manage instances through the web interface # - Indexes movie and TV episode file paths for fast searching # -# 11. Paths: +# 12. Paths: # - Use absolute paths for production deployments # - Relative paths are relative to the working directory # - Ensure the application has read/write access to all paths # - Metadata directory will be created automatically # -# 12. Logging Configuration: +# 13. Logging Configuration: # - Log file defaults to same directory as config file (e.g., "altmount.log") # - Set 'file' to custom path for different location (e.g., "./logs/altmount.log") # - Leave 'file' empty to log to console only @@ -194,8 +202,8 @@ providers: # - Old files are automatically compressed when 'compress: true' # - Supports backward compatibility with top-level 'log_level' # -# 13. Security: +# 14. Security: # - Store credentials securely # - Consider using environment variables for sensitive data # - Use strong passwords for WebDAV authentication -# - API keys for Radarr/Sonarr should be kept secure \ No newline at end of file +# - API keys for Radarr/Sonarr should be kept secure diff --git a/docker-compose.yml b/docker-compose.yml index 3ce8b908..2c586c82 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -6,7 +6,7 @@ services: dockerfile: docker/Dockerfile image: altmount:latest ports: - - "8081:8081" + - "8080:8080" volumes: - ./example/config:/config - ./example/metadata:/metadata @@ -29,4 +29,4 @@ services: interval: 30s timeout: 10s retries: 3 - start_period: 40s + start_period: 5s diff --git a/docker/Dockerfile b/docker/Dockerfile index ec8ed065..7d540802 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -112,7 +112,7 @@ EXPOSE 8080 VOLUME ["/config", "/metadata"] # Health check -HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \ +HEALTHCHECK --interval=5s --timeout=10s --start-period=5s --retries=3 \ CMD wget --no-verbose --tries=1 --spider http://localhost:8080/live || exit 1 # Labels diff --git a/docker/Dockerfile.ci b/docker/Dockerfile.ci index 0c83b647..a8d5676d 100644 --- a/docker/Dockerfile.ci +++ b/docker/Dockerfile.ci @@ -91,7 +91,7 @@ EXPOSE 8080 VOLUME ["/config", "/metadata"] # Health check -HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \ +HEALTHCHECK --interval=5s --timeout=10s --start-period=5s --retries=3 \ CMD wget --no-verbose --tries=1 --spider http://localhost:8080/live || exit 1 # Labels diff --git a/docs/docs/1. intro.md b/docs/docs/1. intro.md index 240397ad..4cb7ee4c 100644 --- a/docs/docs/1. intro.md +++ b/docs/docs/1. intro.md @@ -139,15 +139,23 @@ After mounting, configure rclone VFS in AltMount: 2. Set **Rclone VFS Host** to `http://host.docker.internal:5573` 3. Save configuration -### 4. Enable SABnzbd Compatibility +### 4. Configure Mount Directory + +1. Go to AltMount web interface → **Settings** → **WebDAV** +2. Set **Mount Directory** to `/mnt/altmount` (or the path where you mounted AltMount) +3. Save configuration + +> **Important**: This configuration is required for ARR integrations to work properly. Without setting the mount_dir, the ARR applications won't be able to locate downloaded files. + +### 5. Enable SABnzbd Compatibility 1. Return to AltMount web interface 2. Go to **Configuration** → **Integration** 3. Enable **SABnzbd API compatibility** -4. Set **Complete directory** to `/mnt/altmount` +4. Set **Complete directory** to `/complete` (can be also set to an empty directory "/" and files will be imported into the mount root path) 5. Save configuration -### 5. Configure Your ARR Applications +### 6. Configure Your ARR Applications In your ARR applications (Radarr, Sonarr, etc.), configure the SABnzbd client: @@ -155,18 +163,16 @@ In your ARR applications (Radarr, Sonarr, etc.), configure the SABnzbd client: - **Port**: `8080` - **API Key**: (found in AltMount config/system) - **Category**: Set as needed -- **Downloaded Path**: `/mnt/altmount` **Important**: To prevent copy when importing, you need to enable "Use Hardlinks instead of Copy" in the sonarr/radarr settings/mediamanagement. -### 6. Add ARR Integration (Optional) +### 7. Add ARR Integration (Optional) For enhanced integration: 1. Go to AltMount **Configuration** → **ARR Integration** 2. Add your ARR application instances -3. Set the **WebDAV Mount Path** to `/mnt/altmount` -4. Save configuration +3. Save configuration That's it! Your media applications can now seamlessly download and stream content through AltMount. diff --git a/docs/docs/3. Configuration/basic.md b/docs/docs/3. Configuration/basic.md index 126a4a68..c6db874a 100644 --- a/docs/docs/3. Configuration/basic.md +++ b/docs/docs/3. Configuration/basic.md @@ -199,7 +199,7 @@ AltMount can act as a drop-in replacement for SABnzbd: ```yaml sabnzbd: enabled: false # Enable SABnzbd-compatible API - mount_dir: "/mnt/altmount" # Directory where WebDAV is mounted + complete_dir: "/mnt/altmount/completed" # The complete directory where the files will be imported from the WebDAV mount POINT OF VIEW ``` **Integration Workflow:** @@ -207,9 +207,9 @@ sabnzbd: 1. **ARR Configuration**: Add AltMount as SABnzbd downloader in your ARRs 2. **NZB Reception**: AltMount receives NZB from ARR, imports it 3. **Completion Notification**: ARR picks up the completed download -4. **Mount Directory**: Must be configured from the mounted directory point of view (where ARRs see the files) +4. **Complete Directory**: Must be configured from the mounted directory point of view (where ARRs see the files) -**Critical Configuration**: The `mount_dir` must be set to the path where your ARRs see the WebDAV-mounted files, not the local AltMount path. +**Critical Configuration**: The `complete_dir` must be set to the path where your ARRs see the WebDAV-mounted files, not the local AltMount path. See [SABnzbd Integration](integration.md) for complete setup instructions. diff --git a/docs/docs/3. Configuration/integration.md b/docs/docs/3. Configuration/integration.md index a593d0b0..dc132c8b 100644 --- a/docs/docs/3. Configuration/integration.md +++ b/docs/docs/3. Configuration/integration.md @@ -37,7 +37,7 @@ _SABnzbd-compatible API configuration in the AltMount web interface_ **Configuration Steps:** 1. **Enable SABnzbd API**: Check the enabled checkbox to activate the API -2. **Mount Directory**: Set where WebDAV will be mounted (use `/mnt/unionfs` if using unionfs) +2. **Complete Directory**: Set where WebDAV will be mounted (use `/mnt/unionfs/altmount/completed` if using unionfs) 3. **Categories**: Configure download categories for different media types: - **movies**: For movie downloads (order: 1, priority: 0) - **tv**: For TV show downloads (order: 2, priority: 0) diff --git a/docs/docs/4. API/endpoints.md b/docs/docs/4. API/endpoints.md index a9934990..489c356b 100644 --- a/docs/docs/4. API/endpoints.md +++ b/docs/docs/4. API/endpoints.md @@ -25,16 +25,21 @@ Manually add a file by filesystem path to the import queue. This is useful for c **Query Parameters**: - `apikey` (required): Your AltMount API key -- `relative_path` (optional): Path that will be stripped from the file destination **Request Body** (JSON): ```json { - "file_path": "/path/to/your/file.nzb" + "file_path": "/path/to/your/file.nzb", + "relative_path": "/path/to/strip" } ``` +**Request Body Fields**: + +- `file_path` (required): Full path to the file to import +- `relative_path` (optional): Path that will be stripped from the file destination + #### Response Format **Success Response** (200 OK): @@ -66,9 +71,9 @@ curl -X POST "http://localhost:8080/api/import/file?apikey=YOUR_API_KEY" \ -d '{"file_path": "/downloads/movie.nzb"}' # Import with relative path -curl -X POST "http://localhost:8080/api/import/file?apikey=YOUR_API_KEY&relative_path=/downloads" \ +curl -X POST "http://localhost:8080/api/import/file?apikey=YOUR_API_KEY" \ -H "Content-Type: application/json" \ - -d '{"file_path": "/downloads/subfolder/tvshow.nzb"}' + -d '{"file_path": "/downloads/subfolder/tvshow.nzb", "relative_path": "/downloads"}' ``` #### File Requirements diff --git a/docs/docs/5. Troubleshooting/performance.md b/docs/docs/5. Troubleshooting/performance.md index a90c8962..e63496c3 100644 --- a/docs/docs/5. Troubleshooting/performance.md +++ b/docs/docs/5. Troubleshooting/performance.md @@ -166,7 +166,7 @@ streaming: import: max_processor_workers: 4 # Multiple NZB processors - queue_processing_interval: 2 # Fast queue processing + queue_processing_interval_seconds: 2 # Fast queue processing ``` #### Balanced Configuration @@ -179,7 +179,7 @@ streaming: import: max_processor_workers: 2 # Standard processing - queue_processing_interval: 5 # Standard interval + queue_processing_interval_seconds: 5 # Standard interval ``` #### Resource-Constrained Configuration @@ -192,7 +192,7 @@ streaming: import: max_processor_workers: 1 # Single processor - queue_processing_interval: 10 # Slower processing + queue_processing_interval_seconds: 10 # Slower processing ``` ### Provider Optimization @@ -340,7 +340,7 @@ streaming: import: max_processor_workers: 8 # Fast NZB processing - queue_processing_interval: 1 # Very fast queue processing + queue_processing_interval_seconds: 1 # Very fast queue processing # Optimize for throughput over latency log: diff --git a/frontend/src/api/client.ts b/frontend/src/api/client.ts index 6cbed411..67e09e35 100644 --- a/frontend/src/api/client.ts +++ b/frontend/src/api/client.ts @@ -31,11 +31,13 @@ import type { export class APIError extends Error { public status: number; + public details: string; - constructor(status: number, message: string) { + constructor(status: number, message: string, details: string) { super(message); this.status = status; this.name = "APIError"; + this.details = details; } } @@ -50,6 +52,7 @@ export class APIClient { const url = `${this.baseURL}${endpoint}`; const config: RequestInit = { + credentials: "include", // Include cookies for Safari compatibility headers: { "Content-Type": "application/json", ...options.headers, @@ -61,13 +64,19 @@ export class APIClient { const response = await fetch(url, config); if (!response.ok) { - throw new APIError(response.status, `HTTP ${response.status}: ${response.statusText}`); + const errorData = await response.json(); + throw new APIError( + response.status, + errorData.message || `HTTP ${response.status}: ${response.statusText}`, + errorData.details || "", + ); } const data: APIResponse = await response.json(); if (!data.success) { - throw new APIError(response.status, data.error || "API request failed"); + // Handle error in the success=false format + throw new APIError(response.status, data.error || "API request failed", ""); } return data.data as T; @@ -75,7 +84,7 @@ export class APIClient { if (error instanceof APIError) { throw error; } - throw new APIError(0, error instanceof Error ? error.message : "Network error"); + throw new APIError(0, error instanceof Error ? error.message : "Network error", ""); } } @@ -86,6 +95,7 @@ export class APIClient { const url = `${this.baseURL}${endpoint}`; const config: RequestInit = { + credentials: "include", // Include cookies for Safari compatibility headers: { "Content-Type": "application/json", ...options.headers, @@ -97,13 +107,29 @@ export class APIClient { const response = await fetch(url, config); if (!response.ok) { - throw new APIError(response.status, `HTTP ${response.status}: ${response.statusText}`); + // Try to parse error response + try { + const errorData = await response.json(); + throw new APIError( + response.status, + errorData.message || `HTTP ${response.status}: ${response.statusText}`, + errorData.details || "", + ); + } catch { + // If parsing fails, use generic error + throw new APIError( + response.status, + `HTTP ${response.status}: ${response.statusText}`, + "", + ); + } } const data: APIResponse = await response.json(); if (!data.success) { - throw new APIError(response.status, data.error || "API request failed"); + // Handle error in the success=false format + throw new APIError(response.status, data.error || "API request failed", ""); } return data; @@ -111,7 +137,7 @@ export class APIClient { if (error instanceof APIError) { throw error; } - throw new APIError(0, error instanceof Error ? error.message : "Network error"); + throw new APIError(0, error instanceof Error ? error.message : "Network error", ""); } } @@ -152,6 +178,16 @@ export class APIClient { }); } + async restartBulkQueueItems(ids: number[]) { + return this.request<{ restarted_count: number; message: string }>("/queue/bulk/restart", { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ ids }), + }); + } + async retryQueueItem(id: number) { return this.request(`/queue/${id}/retry`, { method: "POST", @@ -172,6 +208,16 @@ export class APIClient { }); } + async clearFailedQueue(olderThan?: string) { + const searchParams = new URLSearchParams(); + if (olderThan) searchParams.set("older_than", olderThan); + + const query = searchParams.toString(); + return this.request(`/queue/failed${query ? `?${query}` : ""}`, { + method: "DELETE", + }); + } + // Health endpoints async getHealth(params?: { limit?: number; @@ -195,8 +241,8 @@ export class APIClient { return this.request(`/health/${encodeURIComponent(id)}`); } - async deleteHealthItem(id: string) { - return this.request(`/health/${encodeURIComponent(id)}`, { + async deleteHealthItem(id: number) { + return this.request(`/health/${id}`, { method: "DELETE", }); } @@ -220,8 +266,8 @@ export class APIClient { }); } - async repairHealthItem(id: string, resetRepairRetryCount?: boolean) { - return this.request(`/health/${encodeURIComponent(id)}/repair`, { + async repairHealthItem(id: number, resetRepairRetryCount?: boolean) { + return this.request(`/health/${id}/repair`, { method: "POST", body: JSON.stringify({ reset_repair_retry_count: resetRepairRetryCount }), }); @@ -262,28 +308,30 @@ export class APIClient { return this.request("/system/pool/metrics"); } - async directHealthCheck(filePath: string) { + async directHealthCheck(id: number) { return this.request<{ message: string; + id: number; file_path: string; old_status: string; new_status: string; checked_at: string; health_data: FileHealth; - }>(`/health/${encodeURIComponent(filePath)}/check-now`, { + }>(`/health/${id}/check-now`, { method: "POST", }); } - async cancelHealthCheck(filePath: string) { + async cancelHealthCheck(id: number) { return this.request<{ message: string; + id: number; file_path: string; old_status: string; new_status: string; cancelled_at: string; health_data: FileHealth; - }>(`/health/${encodeURIComponent(filePath)}/cancel`, { + }>(`/health/${id}/cancel`, { method: "POST", }); } @@ -460,19 +508,44 @@ export class APIClient { const response = await fetch(url, { method: "POST", body: formData, + credentials: "include", // Include cookies for Safari compatibility }); if (!response.ok) { - throw new APIError(response.status, `Upload failed: ${response.statusText}`); + throw new APIError(response.status, `Upload failed: ${response.statusText}`, ""); } const data = await response.json(); if (!data.status) { - throw new APIError(response.status, data.error || "Upload failed"); + const err = data as APIError; + throw new APIError(response.status, err.message || "Upload failed", err.details || ""); } return data; } + + // Native upload endpoint using JWT authentication + async uploadToQueue( + file: File, + category?: string, + priority?: number, + ): Promise> { + const formData = new FormData(); + formData.append("file", file); + if (category) { + formData.append("category", category); + } + if (priority !== undefined) { + formData.append("priority", priority.toString()); + } + + return this.request>("/queue/upload", { + method: "POST", + body: formData, + // Don't set Content-Type header - let browser set it with boundary for multipart/form-data + headers: {}, + }); + } } // Export a default instance diff --git a/frontend/src/components/config/ArrsConfigSection.tsx b/frontend/src/components/config/ArrsConfigSection.tsx index c098c3cb..3bafbc0c 100644 --- a/frontend/src/components/config/ArrsConfigSection.tsx +++ b/frontend/src/components/config/ArrsConfigSection.tsx @@ -50,12 +50,7 @@ export function ArrsConfigSection({ const errors: string[] = []; if (data.enabled) { - // Validate mount path - if (!data.mount_path.trim()) { - errors.push("Mount path is required when arrs is enabled"); - } else if (!data.mount_path.startsWith("/")) { - errors.push("Mount path must be an absolute path (start with /)"); - } + // Note: mount_path validation is now handled at the root config level // Validate instances const allInstanceNames = [ @@ -223,33 +218,6 @@ export function ArrsConfigSection({ - {/* Default Settings */} - {formData.enabled && ( -
-
-

Default Settings

- -
-
- WebDAV Mount Path - handleFormChange("mount_path", e.target.value)} - placeholder="/mnt/altmount" - disabled={isReadOnly} - /> -

- Absolute path where WebDAV is mounted. In case you have a setup an union in the - arrs, add the union instead. Ex: "/mnt/unionfs", "/mnt/altmount" -

-
-
-
-
- )} - {/* Radarr Instances */} {formData.enabled && (
diff --git a/frontend/src/components/config/ArrsInstanceCard.tsx b/frontend/src/components/config/ArrsInstanceCard.tsx index 13d6042d..63cf8dbe 100644 --- a/frontend/src/components/config/ArrsInstanceCard.tsx +++ b/frontend/src/components/config/ArrsInstanceCard.tsx @@ -175,11 +175,7 @@ export function ArrsInstanceCard({ disabled={isReadOnly || isTestingConnection || !instance.url || !instance.api_key} aria-label="Test connection" > - {isTestingConnection ? ( -
- ) : ( - "Test" - )} + {isTestingConnection ?
: "Test"}
{testResult.type && ( diff --git a/frontend/src/components/config/SABnzbdConfigSection.tsx b/frontend/src/components/config/SABnzbdConfigSection.tsx index 9ec68c22..1f5c4a9e 100644 --- a/frontend/src/components/config/SABnzbdConfigSection.tsx +++ b/frontend/src/components/config/SABnzbdConfigSection.tsx @@ -46,11 +46,11 @@ export function SABnzbdConfigSection({ const errors: string[] = []; if (data.enabled) { - // Validate mount_dir is required and absolute - if (!data.mount_dir.trim()) { - errors.push("Mount directory is required when SABnzbd API is enabled"); - } else if (!data.mount_dir.startsWith("/")) { - errors.push("Mount directory must be an absolute path (starting with /)"); + // Validate complete_dir is required and absolute + if (!data.complete_dir?.trim()) { + errors.push("Complete directory is required when SABnzbd API is enabled"); + } else if (!data.complete_dir.startsWith("/")) { + errors.push("Complete directory must be an absolute path (starting with /)"); } // Validate category names are unique @@ -84,8 +84,8 @@ export function SABnzbdConfigSection({ updateFormData({ enabled }); }; - const handleMountDirChange = (mount_dir: string) => { - updateFormData({ mount_dir }); + const handleCompleteDirChange = (complete_dir: string) => { + updateFormData({ complete_dir }); }; const handleCategoryUpdate = (index: number, updates: Partial) => { @@ -176,10 +176,10 @@ export function SABnzbdConfigSection({ handleMountDirChange(e.target.value)} + onChange={(e) => handleCompleteDirChange(e.target.value)} />

Absolute path to the directory where the complete imports will be placed. FROM THE diff --git a/frontend/src/components/config/SystemConfigSection.tsx b/frontend/src/components/config/SystemConfigSection.tsx index 781c924e..7ec955c2 100644 --- a/frontend/src/components/config/SystemConfigSection.tsx +++ b/frontend/src/components/config/SystemConfigSection.tsx @@ -3,11 +3,11 @@ import { useEffect, useState } from "react"; import { useConfirm } from "../../contexts/ModalContext"; import { useToast } from "../../contexts/ToastContext"; import { useAuth, useRegenerateAPIKey } from "../../hooks/useAuth"; -import type { ConfigResponse, SystemFormData } from "../../types/config"; +import type { ConfigResponse, LogFormData } from "../../types/config"; interface SystemConfigSectionProps { config: ConfigResponse; - onUpdate?: (section: string, data: SystemFormData) => Promise; + onUpdate?: (section: string, data: LogFormData) => Promise; isReadOnly?: boolean; isUpdating?: boolean; } @@ -18,8 +18,13 @@ export function SystemConfigSection({ isReadOnly = false, isUpdating = false, }: SystemConfigSectionProps) { - const [formData, setFormData] = useState({ - log_level: config.log_level, + const [formData, setFormData] = useState({ + file: config.log.file, + level: config.log.level, + max_size: config.log.max_size, + max_age: config.log.max_age, + max_backups: config.log.max_backups, + compress: config.log.compress, }); const [hasChanges, setHasChanges] = useState(false); @@ -32,24 +37,41 @@ export function SystemConfigSection({ // Sync form data when config changes from external sources (reload) useEffect(() => { const newFormData = { - log_level: config.log_level, + file: config.log.file, + level: config.log.level, + max_size: config.log.max_size, + max_age: config.log.max_age, + max_backups: config.log.max_backups, + compress: config.log.compress, }; setFormData(newFormData); setHasChanges(false); - }, [config.log_level]); + }, [ + config.log.file, + config.log.level, + config.log.max_size, + config.log.max_age, + config.log.max_backups, + config.log.compress, + ]); - const handleInputChange = (field: keyof SystemFormData, value: string) => { + const handleInputChange = (field: keyof LogFormData, value: string | number | boolean) => { const newData = { ...formData, [field]: value }; setFormData(newData); const configData = { - log_level: config.log_level, + file: config.log.file, + level: config.log.level, + max_size: config.log.max_size, + max_age: config.log.max_age, + max_backups: config.log.max_backups, + compress: config.log.compress, }; setHasChanges(JSON.stringify(newData) !== JSON.stringify(configData)); }; const handleSave = async () => { if (onUpdate && hasChanges) { - await onUpdate("system", formData); + await onUpdate("log", formData); setHasChanges(false); } }; @@ -110,9 +132,9 @@ export function SystemConfigSection({ Log Level handleInputChange("mount_path", e.target.value)} + placeholder="/mnt/altmount" + /> +

+ Absolute path where WebDAV is mounted. Required when ARRs is enabled. + This path will be stripped from file paths when communicating with Radarr/Sonarr. +

+ +
Port diff --git a/frontend/src/components/config/WorkersConfigSection.tsx b/frontend/src/components/config/WorkersConfigSection.tsx index f96b5e5d..fbc25e40 100644 --- a/frontend/src/components/config/WorkersConfigSection.tsx +++ b/frontend/src/components/config/WorkersConfigSection.tsx @@ -58,17 +58,17 @@ export function ImportConfigSection({
- Queue Processing Interval + Queue Processing Interval (Seconds) handleInputChange( - "queue_processing_interval", + "queue_processing_interval_seconds", Number.parseInt(e.target.value, 10) || 5, ) } diff --git a/frontend/src/components/queue/DragDropUpload.tsx b/frontend/src/components/queue/DragDropUpload.tsx index 512a596a..022cac51 100644 --- a/frontend/src/components/queue/DragDropUpload.tsx +++ b/frontend/src/components/queue/DragDropUpload.tsx @@ -1,7 +1,6 @@ import { AlertCircle, CheckCircle2, FileIcon, Upload, X } from "lucide-react"; import { useCallback, useState } from "react"; -import { useAuth } from "../../contexts/AuthContext"; -import { useUploadNzb } from "../../hooks/useApi"; +import { useUploadToQueue } from "../../hooks/useApi"; import { ErrorAlert } from "../ui/ErrorAlert"; interface UploadedFile { @@ -13,10 +12,9 @@ interface UploadedFile { } export function DragDropUpload() { - const { user } = useAuth(); const [isDragOver, setIsDragOver] = useState(false); const [uploadedFiles, setUploadedFiles] = useState([]); - const uploadMutation = useUploadNzb(); + const uploadMutation = useUploadToQueue(); const validateFile = useCallback((file: File): string | null => { // Check file extension @@ -34,11 +32,6 @@ export function DragDropUpload() { const handleFiles = useCallback( (files: File[]) => { - if (!user?.api_key) { - console.error("No API key available"); - return; - } - const newFiles: UploadedFile[] = files.map((file) => ({ file, id: `${file.name}-${Date.now()}-${Math.random()}`, @@ -72,7 +65,6 @@ export function DragDropUpload() { try { const response = await uploadMutation.mutateAsync({ file: uploadFile.file, - apiKey: user.api_key || "", }); // Update status to success @@ -82,7 +74,7 @@ export function DragDropUpload() { ? { ...f, status: "success" as const, - queueId: response.nzo_ids[0], + queueId: response.data?.id.toString(), } : f, ), @@ -103,7 +95,7 @@ export function DragDropUpload() { } }); }, - [user?.api_key, uploadMutation, validateFile], + [uploadMutation, validateFile], ); const handleDragOver = useCallback((e: React.DragEvent) => { @@ -178,24 +170,6 @@ export function DragDropUpload() { } }; - if (!user?.api_key) { - return ( -
-
-
- -
-
API Key Required
-
- You need an API key to upload NZB files. Generate one in the System settings. -
-
-
-
-
- ); - } - return (
diff --git a/frontend/src/hooks/useApi.ts b/frontend/src/hooks/useApi.ts index aad1f6e7..128cb332 100644 --- a/frontend/src/hooks/useApi.ts +++ b/frontend/src/hooks/useApi.ts @@ -55,6 +55,18 @@ export const useDeleteBulkQueueItems = () => { }); }; +export const useRestartBulkQueueItems = () => { + const queryClient = useQueryClient(); + + return useMutation({ + mutationFn: (ids: number[]) => apiClient.restartBulkQueueItems(ids), + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ["queue"] }); + queryClient.invalidateQueries({ queryKey: ["queue", "stats"] }); + }, + }); +}; + export const useRetryQueueItem = () => { const queryClient = useQueryClient(); @@ -77,6 +89,17 @@ export const useClearCompletedQueue = () => { }); }; +export const useClearFailedQueue = () => { + const queryClient = useQueryClient(); + + return useMutation({ + mutationFn: (olderThan?: string) => apiClient.clearFailedQueue(olderThan), + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ["queue"] }); + }, + }); +}; + // Health hooks export const useHealth = (params?: { limit?: number; @@ -129,7 +152,7 @@ export const useDeleteHealthItem = () => { const queryClient = useQueryClient(); return useMutation({ - mutationFn: (id: string) => apiClient.deleteHealthItem(id), + mutationFn: (id: number) => apiClient.deleteHealthItem(id), onSuccess: () => { queryClient.invalidateQueries({ queryKey: ["health"] }); }, @@ -163,7 +186,7 @@ export const useRepairHealthItem = () => { const queryClient = useQueryClient(); return useMutation({ - mutationFn: ({ id, resetRepairRetryCount }: { id: string; resetRepairRetryCount?: boolean }) => + mutationFn: ({ id, resetRepairRetryCount }: { id: number; resetRepairRetryCount?: boolean }) => apiClient.repairHealthItem(id, resetRepairRetryCount), onSuccess: () => { queryClient.invalidateQueries({ queryKey: ["health"] }); @@ -215,7 +238,7 @@ export const useDirectHealthCheck = () => { const queryClient = useQueryClient(); return useMutation({ - mutationFn: (filePath: string) => apiClient.directHealthCheck(filePath), + mutationFn: (id: number) => apiClient.directHealthCheck(id), onSuccess: () => { // Immediately refresh health data to show "checking" status queryClient.invalidateQueries({ queryKey: ["health"] }); @@ -228,7 +251,7 @@ export const useCancelHealthCheck = () => { const queryClient = useQueryClient(); return useMutation({ - mutationFn: (filePath: string) => apiClient.cancelHealthCheck(filePath), + mutationFn: (id: number) => apiClient.cancelHealthCheck(id), onSuccess: () => { // Immediately refresh health data to show cancelled status queryClient.invalidateQueries({ queryKey: ["health"] }); @@ -272,7 +295,7 @@ export const useCancelScan = () => { }); }; -// NZB file upload hook +// NZB file upload hook (SABnzbd API) export const useUploadNzb = () => { const queryClient = useQueryClient(); @@ -286,3 +309,25 @@ export const useUploadNzb = () => { }, }); }; + +// Native upload hook (using JWT authentication) +export const useUploadToQueue = () => { + const queryClient = useQueryClient(); + + return useMutation({ + mutationFn: ({ + file, + category, + priority, + }: { + file: File; + category?: string; + priority?: number; + }) => apiClient.uploadToQueue(file, category, priority), + onSuccess: () => { + // Invalidate queue data to show newly uploaded files + queryClient.invalidateQueries({ queryKey: ["queue"] }); + queryClient.invalidateQueries({ queryKey: ["queue", "stats"] }); + }, + }); +}; diff --git a/frontend/src/hooks/useConfig.ts b/frontend/src/hooks/useConfig.ts index c5cd1f56..95e9298b 100644 --- a/frontend/src/hooks/useConfig.ts +++ b/frontend/src/hooks/useConfig.ts @@ -1,5 +1,6 @@ import { useMutation, useQuery, useQueryClient } from "@tanstack/react-query"; -import { apiClient } from "../api/client"; +import { type APIError, apiClient } from "../api/client"; +import { useToast } from "../contexts/ToastContext"; import type { ConfigSection, ConfigUpdateRequest, ConfigValidateRequest } from "../types/config"; // Query keys for React Query @@ -37,6 +38,7 @@ export function useUpdateConfig() { // Hook to update specific configuration section export function useUpdateConfigSection() { const queryClient = useQueryClient(); + const { showToast } = useToast(); return useMutation({ mutationFn: ({ section, config }: { section: ConfigSection; config: ConfigUpdateRequest }) => @@ -46,7 +48,14 @@ export function useUpdateConfigSection() { queryClient.setQueryData(configKeys.current(), data); }, onError: (error) => { + const err = error as APIError; console.error("Failed to update configuration section:", error); + + showToast({ + type: "error", + title: "Update Failed", + message: err.details, + }); }, }); } diff --git a/frontend/src/pages/ConfigurationPage.tsx b/frontend/src/pages/ConfigurationPage.tsx index 383fb0a9..954c61f7 100644 --- a/frontend/src/pages/ConfigurationPage.tsx +++ b/frontend/src/pages/ConfigurationPage.tsx @@ -40,11 +40,11 @@ import type { ConfigSection, HealthConfig, ImportConfig, + LogFormData, MetadataConfig, RCloneVFSFormData, SABnzbdConfig, StreamingConfig, - SystemFormData, WebDAVConfig, } from "../types/config"; import { CONFIG_SECTIONS } from "../types/config"; @@ -161,9 +161,10 @@ export function ConfigurationPage() { | ImportConfig | MetadataConfig | RCloneVFSFormData - | SystemFormData + | LogFormData | SABnzbdConfig - | ArrsConfig, + | ArrsConfig + | { mount_path: string }, ) => { try { if (section === "webdav" && config) { @@ -216,13 +217,12 @@ export function ConfigurationPage() { section: "rclone", config: { rclone: data as RCloneVFSFormData }, }); - } else if (section === "system") { - const systemData = data as SystemFormData; + } else if (section === "mount_path") { + // For mount_path, we need to update the system section with mount_path + const mountPathData = data as { mount_path: string }; await updateConfigSection.mutateAsync({ section: "system", - config: { - log_level: systemData.log_level, - }, + config: mountPathData, }); } else if (section === "sabnzbd") { await updateConfigSection.mutateAsync({ @@ -518,10 +518,10 @@ export function ConfigurationPage() { "arrs", "health", ].includes(activeSection) && ( - - )} + + )}
diff --git a/frontend/src/pages/HealthPage.tsx b/frontend/src/pages/HealthPage.tsx index 0cda8fb3..c1946042 100644 --- a/frontend/src/pages/HealthPage.tsx +++ b/frontend/src/pages/HealthPage.tsx @@ -74,10 +74,10 @@ export function HealthPage() { const { confirmDelete, confirmAction } = useConfirm(); const { showToast } = useToast(); - const handleDelete = async (filePath: string) => { + const handleDelete = async (id: number) => { const confirmed = await confirmDelete("health record"); if (confirmed) { - await deleteItem.mutateAsync(filePath); + await deleteItem.mutateAsync(id); } }; @@ -121,15 +121,15 @@ export function HealthPage() { } }; - const handleManualCheck = async (filePath: string) => { + const handleManualCheck = async (id: number) => { try { - await directHealthCheck.mutateAsync(filePath); + await directHealthCheck.mutateAsync(id); } catch (err) { console.error("Failed to perform direct health check:", err); } }; - const handleCancelCheck = async (filePath: string) => { + const handleCancelCheck = async (id: number) => { const confirmed = await confirmAction( "Cancel Health Check", "Are you sure you want to cancel this health check?", @@ -141,14 +141,14 @@ export function HealthPage() { ); if (confirmed) { try { - await cancelHealthCheck.mutateAsync(filePath); + await cancelHealthCheck.mutateAsync(id); } catch (err) { console.error("Failed to cancel health check:", err); } } }; - const handleRepair = async (filePath: string) => { + const handleRepair = async (id: number) => { const confirmed = await confirmAction( "Trigger Repair", "This will attempt to ask the ARR to redownload the corrupted file from your media library. THIS FILE WILL BE DELETED IF THE REPAIR IS SUCCESSFUL. Are you sure you want to proceed?", @@ -161,7 +161,7 @@ export function HealthPage() { if (confirmed) { try { await repairHealthItem.mutateAsync({ - id: filePath, + id, resetRepairRetryCount: false, }); showToast({ @@ -172,48 +172,29 @@ export function HealthPage() { } catch (err: unknown) { const error = err as { message?: string; - response?: { - data?: { - error?: { - message?: string; - details?: string; - }; - }; - }; + code?: string; }; console.error("Failed to trigger repair:", err); - // Get error message from response or direct error - const apiErrorMessage = error.response?.data?.error?.message; - const apiErrorDetails = error.response?.data?.error?.details; - const errorMessage = apiErrorMessage || error.message || "Unknown error"; - - // Handle specific error cases - if (errorMessage.includes("Repair not available")) { - showToast({ - title: "Repair not available", - message: apiErrorDetails || "File not found in media library", - type: "error", - }); - } else if (errorMessage.includes("Media library not configured")) { - showToast({ - title: "Configuration Required", - message: "Media library must be configured to use repair functionality", - type: "error", - }); - } else if (errorMessage.includes("Media library error")) { - showToast({ - title: "Media Library Error", - message: "Unable to access media library to verify file availability", - type: "error", - }); - } else { + // Check for 404 Not Found (file not in ARR) + if (error.code === "NOT_FOUND") { showToast({ - title: "Failed to trigger repair", - message: errorMessage, - type: "error", + title: "File Not Found in ARR", + message: + "This file is not managed by any configured Radarr or Sonarr instance. Please check your ARR configuration and ensure the file is in your media library.", + type: "warning", }); + return; } + + // Get error message from response or direct error + const errorMessage = error.message || "Unknown error"; + + showToast({ + title: "Failed to trigger repair", + message: errorMessage, + type: "error", + }); } } }; @@ -696,7 +677,7 @@ export function HealthPage() {
  • )} + {stats && stats.total_failed > 0 && ( + + )}
  • @@ -384,6 +438,15 @@ export function QueuePage() {
    +