diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index e74e59631..f1687eabf 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -2,11 +2,13 @@
Provide a general summary of your changes in the Title above.
The PR title must start with `feat(): `, `docs(): `, `fix(): `, `style(): `, or `refactor(): `, `chore(): `. For example: `feat(component): add new feature`.
If it spans multiple components, use the main component as the prefix and enumerate in the title, describe in the body.
+ For breaking changes, add `!` after the type, e.g., `feat(component)!: breaking change`.
-->
## Description / 描述
diff --git a/.github/workflows/issue_pr_comment.yml b/.github/workflows/issue_pr_comment.yml
index c618485f0..f15acd424 100644
--- a/.github/workflows/issue_pr_comment.yml
+++ b/.github/workflows/issue_pr_comment.yml
@@ -47,12 +47,14 @@ jobs:
with:
script: |
const title = context.payload.pull_request.title || "";
- const ok = /^(feat|docs|fix|style|refactor|chore)\(.+?\): /i.test(title);
+ const ok = /^(feat|docs|fix|style|refactor|chore)\(.+?\)!?: /i.test(title);
if (!ok) {
let comment = "⚠️ PR 标题需以 `feat(): `, `docs(): `, `fix(): `, `style(): `, `refactor(): `, `chore(): ` 其中之一开头,例如:`feat(component): 新增功能`。\n";
comment += "⚠️ The PR title must start with `feat(): `, `docs(): `, `fix(): `, `style(): `, or `refactor(): `, `chore(): `. For example: `feat(component): add new feature`.\n\n";
comment += "如果跨多个组件,请使用主要组件作为前缀,并在标题中枚举、描述中说明。\n";
comment += "If it spans multiple components, use the main component as the prefix and enumerate in the title, describe in the body.\n\n";
+ comment += "如果是破坏性变更,请在类型后添加 `!`,例如 `feat(component)!: 破坏性变更`。\n";
+ comment += "For breaking changes, add `!` after the type, e.g., `feat(component)!: breaking change`.\n\n";
await github.rest.issues.createComment({
...context.repo,
issue_number: context.issue.number,
diff --git a/.github/workflows/test_docker.yml b/.github/workflows/test_docker.yml
index aa6fe8966..d3f4a8fff 100644
--- a/.github/workflows/test_docker.yml
+++ b/.github/workflows/test_docker.yml
@@ -1,5 +1,4 @@
name: Beta Release (Docker)
-
on:
workflow_dispatch:
push:
@@ -7,37 +6,69 @@ on:
- main
pull_request:
branches:
- - main
+ - copy
+
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true
env:
- DOCKERHUB_ORG_NAME: ${{ vars.DOCKERHUB_ORG_NAME || 'openlistteam' }}
- GHCR_ORG_NAME: ${{ vars.GHCR_ORG_NAME || 'openlistteam' }}
- IMAGE_NAME: openlist-git
- IMAGE_NAME_DOCKERHUB: openlist
+ GHCR_ORG_NAME: ${{ vars.GHCR_ORG_NAME || 'ironboxplus' }} # 👈 最好改成你的用户名,防止推错地方
+ IMAGE_NAME: openlist
REGISTRY: ghcr.io
ARTIFACT_NAME: 'binaries_docker_release'
- RELEASE_PLATFORMS: 'linux/amd64,linux/arm64,linux/arm/v7,linux/386,linux/arm/v6,linux/ppc64le,linux/riscv64,linux/loong64' ### Temporarily disable Docker builds for linux/s390x architectures for unknown reasons.
- IMAGE_PUSH: ${{ github.event_name == 'push' }}
+ # 👇 关键修改:只保留 linux/amd64,删掉后面一长串
+ RELEASE_PLATFORMS: 'linux/amd64'
+ # 👇 关键修改:强制允许推送,不用管是不是 push 事件
+ IMAGE_PUSH: 'true'
+ # 👇 使用默认的前端仓库 (OpenListTeam/OpenList-Frontend)
+ # FRONTEND_REPO: 'Ironboxplus/OpenList-Frontend'
IMAGE_TAGS_BETA: |
type=ref,event=pr
- type=raw,value=beta,enable={{is_default_branch}}
+ type=raw,value=beta-retry
jobs:
build_binary:
- name: Build Binaries for Docker Release (Beta)
+ name: Build Binaries (x64 Only)
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- - uses: actions/setup-go@v5
+ - name: Setup Go
+ uses: actions/setup-go@v5
with:
go-version: '1.25.0'
+ cache: true
+ cache-dependency-path: go.sum
+
+ # 获取前端仓库的最新commit SHA
+ - name: Get Frontend Commit SHA
+ id: frontend-sha
+ run: |
+ FRONTEND_REPO="${{ env.FRONTEND_REPO }}"
+ # 如果未设置FRONTEND_REPO,使用默认值
+ if [ -z "$FRONTEND_REPO" ]; then
+ FRONTEND_REPO="OpenListTeam/OpenList-Frontend"
+ fi
+ FRONTEND_SHA=$(curl -s https://api.github.com/repos/$FRONTEND_REPO/commits/main | jq -r '.sha')
+ echo "sha=$FRONTEND_SHA" >> $GITHUB_OUTPUT
+ echo "repo=$FRONTEND_REPO" >> $GITHUB_OUTPUT
+ echo "Frontend repo: $FRONTEND_REPO"
+ echo "Frontend repo latest commit: $FRONTEND_SHA"
+
+ # 缓存前端下载 - key包含前端仓库的commit SHA
+ - name: Cache Frontend
+ id: cache-frontend
+ uses: actions/cache@v4
+ with:
+ path: public/dist
+ key: frontend-${{ steps.frontend-sha.outputs.repo }}-${{ steps.frontend-sha.outputs.sha }}
+ restore-keys: |
+ frontend-${{ steps.frontend-sha.outputs.repo }}-
+ # 即使只构建 x64,我们也需要 musl 工具链(因为 BuildDockerMultiplatform 默认会检查它)
- name: Cache Musl
id: cache-musl
uses: actions/cache@v4
@@ -51,11 +82,12 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- - name: Build go binary (beta)
+ - name: Build go binary
+ # 这里还是跑 docker-multiplatform,虽然会多编译一些架构,但这是兼容 Dockerfile 路径最稳妥的方法
run: bash build.sh beta docker-multiplatform
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- FRONTEND_REPO: ${{ vars.FRONTEND_REPO }}
+ # FRONTEND_REPO 使用 build.sh 默认值 (OpenListTeam/OpenList-Frontend)
- name: Upload artifacts
uses: actions/upload-artifact@v4
@@ -69,12 +101,13 @@ jobs:
release_docker:
needs: build_binary
- name: Release Docker image (Beta)
+ name: Release Docker (x64)
runs-on: ubuntu-latest
permissions:
packages: write
strategy:
matrix:
+ # 构建所有变体
image: ["latest", "ffmpeg", "aria2", "aio"]
include:
- image: "latest"
@@ -102,49 +135,37 @@ jobs:
with:
name: ${{ env.ARTIFACT_NAME }}
path: 'build/'
-
- - name: Set up QEMU
- uses: docker/setup-qemu-action@v3
-
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
+ # 👇 只保留 GitHub 登录,删除了 DockerHub 登录
- name: Login to GitHub Container Registry
- if: env.IMAGE_PUSH == 'true'
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- - name: Login to DockerHub Container Registry
- if: env.IMAGE_PUSH == 'true'
- uses: docker/login-action@v3
- with:
- username: ${{ vars.DOCKERHUB_ORG_NAME_BACKUP || env.DOCKERHUB_ORG_NAME }}
- password: ${{ secrets.DOCKERHUB_TOKEN }}
-
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: |
${{ env.REGISTRY }}/${{ env.GHCR_ORG_NAME }}/${{ env.IMAGE_NAME }}
- ${{ env.DOCKERHUB_ORG_NAME }}/${{ env.IMAGE_NAME_DOCKERHUB }}
tags: ${{ env.IMAGE_TAGS_BETA }}
- flavor: |
- ${{ matrix.tag_favor }}
+ flavor: ${{ matrix.tag_favor }}
- name: Build and push
- id: docker_build
uses: docker/build-push-action@v6
with:
context: .
file: Dockerfile.ci
- push: ${{ env.IMAGE_PUSH == 'true' }}
+ push: true
build-args: |
BASE_IMAGE_TAG=${{ matrix.base_image_tag }}
${{ matrix.build_arg }}
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
platforms: ${{ env.RELEASE_PLATFORMS }}
+ cache-from: type=registry,ref=${{ env.REGISTRY }}/${{ env.GHCR_ORG_NAME }}/${{ env.IMAGE_NAME }}:buildcache-${{ matrix.image }}
+ cache-to: type=registry,ref=${{ env.REGISTRY }}/${{ env.GHCR_ORG_NAME }}/${{ env.IMAGE_NAME }}:buildcache-${{ matrix.image }},mode=max
diff --git a/.gitignore b/.gitignore
index 1d71f0d60..add6d56bb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -31,4 +31,5 @@ output/
/public/dist/*
/!public/dist/README.md
-.VSCodeCounter
\ No newline at end of file
+.VSCodeCounter
+nul
diff --git a/CLAUDE.md b/CLAUDE.md
new file mode 100644
index 000000000..c3ffdded0
--- /dev/null
+++ b/CLAUDE.md
@@ -0,0 +1,346 @@
+# CLAUDE.md
+
+This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
+
+## Core Development Principles
+
+1. **最小代码改动原则** (Minimum code changes): Make the smallest change necessary to achieve the goal
+2. **不缓存整个文件原则** (No full file caching for seekable streams): For SeekableStream, use RangeRead instead of caching entire file
+3. **必要情况下可以多遍上传原则** (Multi-pass upload when necessary): If rapid upload fails, fall back to normal upload
+
+## Build and Development Commands
+
+```bash
+# Development
+go run main.go # Run backend server (default port 5244)
+air # Hot reload during development (uses .air.toml)
+./build.sh dev # Build development version with frontend
+./build.sh release # Build release version
+
+# Testing
+go test ./... # Run all tests
+
+# Docker
+docker-compose up # Run with docker-compose
+docker build -f Dockerfile . # Build docker image
+```
+
+**Build Script Details** (`build.sh`):
+- Fetches frontend from OpenListTeam/OpenList-Frontend releases
+- Injects version info via ldflags: `-X "github.com/OpenListTeam/OpenList/v4/internal/conf.BuiltAt=$(date +'%F %T %z')"`
+- Supports `dev`, `beta`, and release builds
+- Downloads prebuilt frontend distribution automatically
+
+**Go Version**: Requires Go 1.23.4+
+
+## Architecture Overview
+
+### Driver System (Storage Abstraction)
+
+OpenList uses a **driver pattern** to support 70+ cloud storage providers. Each driver implements the core `Driver` interface.
+
+**Location**: `drivers/*/`
+
+**Core Interfaces** (`internal/driver/driver.go`):
+- `Reader`: List directories, generate download links (REQUIRED)
+- `Writer`: Upload, delete, move files (optional)
+- `ArchiveDriver`: Extract archives (optional)
+- `LinkCacheModeResolver`: Custom cache TTL strategies (optional)
+
+**Driver Registration Pattern**:
+```go
+// In drivers/your_driver/meta.go
+var config = driver.Config{
+ Name: "YourDriver",
+ LocalSort: false,
+ NoCache: false,
+ DefaultRoot: "/",
+}
+
+func init() {
+ op.RegisterDriver(func() driver.Driver {
+ return &YourDriver{}
+ })
+}
+```
+
+**Adding a New Driver**:
+1. Copy `drivers/template/` to `drivers/your_driver/`
+2. Implement `List()` and `Link()` methods (required)
+3. Define `Addition` struct with configuration fields using struct tags:
+ - `json:"field_name"` - JSON field name
+ - `type:"select"` - Input type (select, string, text, bool, number)
+ - `required:"true"` - Required field
+ - `options:"a,b,c"` - Dropdown options
+ - `default:"value"` - Default value
+4. Register driver in `init()` function
+
+**Example Driver Structure**:
+```go
+type YourDriver struct {
+ model.Storage
+ Addition
+ client *YourClient
+}
+
+func (d *YourDriver) Init(ctx context.Context) error {
+ // Initialize client, login, etc.
+}
+
+func (d *YourDriver) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
+ // Return list of files/folders
+}
+
+func (d *YourDriver) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
+ // Return download URL or RangeReader
+}
+```
+
+### Request Flow
+
+```
+HTTP Request (Gin Router)
+ ↓
+Middleware (Auth, CORS, Logging)
+ ↓
+Handler (server/handles/)
+ ↓
+fs.List/Get/Link (mount path → storage path conversion)
+ ↓
+op.List/Get/Link (caching, driver lookup)
+ ↓
+Driver.List/Link (storage-specific API calls)
+ ↓
+Response (JSON / Proxy / Redirect)
+```
+
+### Internal Package Structure
+
+| Package | Purpose |
+|---------|---------|
+| `bootstrap/` | Initialization sequence: config, DB, storages, servers |
+| `conf/` | Configuration management |
+| `db/` | Database models (SQLite/MySQL/Postgres) |
+| `driver/` | Driver interface definitions |
+| `fs/` | Mount path abstraction (converts `/mount/path` to storage + path) |
+| `op/` | Core operations with caching and driver management |
+| `stream/` | Streaming, range readers, link refresh, rate limiting |
+| `model/` | Data models (Obj, Link, Storage, User) |
+| `cache/` | Multi-level caching (directories, links, users, settings) |
+| `net/` | HTTP utilities, proxy config, download manager |
+
+### Link Generation and Caching
+
+**Link Types**:
+1. **Direct URL** (`link.URL`): Simple redirect to storage provider
+2. **RangeReader** (`link.RangeReader`): Custom streaming implementation
+3. **Refreshable Link** (`link.Refresher`): Auto-refresh on expiration
+
+**Cache System** (`internal/op/cache.go`):
+- **Directory Cache**: Stores file listings with configurable TTL
+- **Link Cache**: Stores download URLs (30min default)
+- **User Cache**: Authentication data (1hr default)
+- **Custom Policies**: Pattern-based TTL via `pattern:ttl` format
+
+**Cache Key Pattern**: `{storageMountPath}/{relativePath}`
+
+**Invalidation**: Recursive tree deletion for directory operations
+
+### Range Reader and Streaming
+
+**Location**: `internal/stream/`
+
+**Purpose**: Handle partial content requests (HTTP 206), multi-threaded downloads, and link refresh during streaming.
+
+**Key Components**:
+
+1. **RangeReaderIF**: Core interface for range-based reading
+ ```go
+ type RangeReaderIF interface {
+ RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error)
+ }
+ ```
+
+2. **RefreshableRangeReader**: Wraps RangeReader with automatic link refresh
+ - Detects expired links via error strings or HTTP status codes (401, 403, 410, 500)
+ - Calls `link.Refresher(ctx)` to get new link
+ - Resumes download from current byte position
+ - Max 3 refresh attempts to prevent infinite loops
+
+3. **Multi-threaded Downloader** (`internal/net/downloader.go`):
+ - Splits file into parts based on `Concurrency` and `PartSize`
+ - Downloads parts in parallel
+ - Assembles final stream
+
+**Stream Types and Reader Management**:
+
+⚠️ **CRITICAL**: SeekableStream.Reader must NEVER be created early!
+
+- **FileStream**: One-time sequential stream (e.g., HTTP body)
+ - `Reader` is set at creation and consumed sequentially
+ - Cannot be rewound or re-read
+
+- **SeekableStream**: Reusable stream with RangeRead capability
+ - Has `rangeReader` for creating new readers on-demand
+ - `Reader` should ONLY be created when actually needed for sequential reading
+ - **DO NOT create Reader early** - use lazy initialization via `generateReader()`
+
+**Common Pitfall - Early Reader Creation**:
+```go
+// ❌ WRONG: Creating Reader early
+if _, ok := rr.(*model.FileRangeReader); ok {
+ rc, _ := rr.RangeRead(ctx, http_range.Range{Length: -1})
+ fs.Reader = rc // This will be consumed by intermediate operations!
+}
+
+// ✅ CORRECT: Let generateReader() create it on-demand
+// Reader will be created only when Read() is called
+return &SeekableStream{FileStream: fs, rangeReader: rr}, nil
+```
+
+**Why This Matters**:
+- Hash calculation uses `StreamHashFile()` which reads the file via RangeRead
+- If Reader is created early, it may be at EOF when HTTP upload actually needs it
+- Result: `http: ContentLength=X with Body length 0` error
+
+**Hash Calculation for Uploads**:
+```go
+// For SeekableStream: Use RangeRead to avoid consuming Reader
+if _, ok := file.(*SeekableStream); ok {
+ hash, err = stream.StreamHashFile(file, utils.MD5, 40, &up)
+ // StreamHashFile uses RangeRead internally, Reader remains unused
+}
+
+// For FileStream: Must cache first, then calculate hash
+_, hash, err = stream.CacheFullAndHash(file, &up, utils.MD5)
+```
+
+**Link Refresh Pattern**:
+```go
+// In op.Link(), a refresher is automatically attached
+link.Refresher = func(refreshCtx context.Context) (*model.Link, model.Obj, error) {
+ // Get fresh link from storage driver
+ file, err := GetUnwrap(refreshCtx, storage, path)
+ newLink, err := storage.Link(refreshCtx, file, args)
+ return newLink, file, nil
+}
+
+// RefreshableRangeReader uses this during streaming
+if IsLinkExpiredError(err) && r.link.Refresher != nil {
+ newLink, _, err := r.link.Refresher(ctx)
+ // Resume from current position
+}
+```
+
+**Proxy Function** (`server/common/proxy.go`):
+
+Handles multiple scenarios:
+1. Multi-threaded download (`link.Concurrency > 0`)
+2. Direct RangeReader (`link.RangeReader != nil`)
+3. Refreshable link (`link.Refresher != nil`) ← Wraps with RefreshableRangeReader
+4. Transparent proxy (forwards to `link.URL`)
+
+### Startup Sequence
+
+**Location**: `internal/bootstrap/run.go`
+
+Order of initialization:
+1. `InitConfig()` - Load config, environment variables
+2. `Log()` - Initialize logging
+3. `InitDB()` - Connect to database
+4. `data.InitData()` - Initialize default data
+5. `LoadStorages()` - Load and initialize all storage drivers
+6. `InitTaskManager()` - Start background tasks
+7. `Start()` - Start HTTP/HTTPS/WebDAV/FTP/SFTP servers
+
+## Common Patterns
+
+### Error Handling
+
+Use custom errors from `internal/errs/`:
+- `errs.NotImplement` - Feature not implemented
+- `errs.ObjectNotFound` - File/folder not found
+- `errs.NotFolder` - Path is not a directory
+- `errs.StorageNotInit` - Storage driver not initialized
+
+**Link Expiry Detection**:
+```go
+// Checks error string for keywords: "expired", "invalid signature", "token expired"
+// Also checks HTTP status: 401, 403, 410, 500
+if stream.IsLinkExpiredError(err) {
+ // Refresh link
+}
+```
+
+### Saving Driver State
+
+When updating tokens or credentials:
+```go
+d.AccessToken = newToken
+op.MustSaveDriverStorage(d) // Persists to database
+```
+
+### Rate Limiting
+
+Use `rate.Limiter` for API rate limits:
+```go
+type YourDriver struct {
+ limiter *rate.Limiter
+}
+
+func (d *YourDriver) Init(ctx context.Context) error {
+ d.limiter = rate.NewLimiter(rate.Every(time.Second), 1) // 1 req/sec
+}
+
+func (d *YourDriver) List(...) {
+ d.limiter.Wait(ctx)
+ // Make API call
+}
+```
+
+### Context Cancellation
+
+Always respect context cancellation in long operations:
+```go
+select {
+case <-ctx.Done():
+ return nil, ctx.Err()
+default:
+ // Continue operation
+}
+```
+
+## Important Conventions
+
+**Naming**:
+- Drivers: lowercase with underscores (e.g., `baidu_netdisk`, `aliyundrive_open`)
+- Packages: lowercase (e.g., `internal/op`)
+- Interfaces: PascalCase with suffix (e.g., `Reader`, `Writer`)
+
+**Driver Configuration Fields**:
+- Use `driver.RootPath` or `driver.RootID` for root folder
+- Add `omitempty` to optional JSON fields
+- Use descriptive help text in struct tags
+
+**Retries and Timeouts**:
+- Use `github.com/avast/retry-go` for retry logic
+- Set reasonable timeouts on HTTP clients (default 30s in `base.RestyClient`)
+- For unstable APIs, implement exponential backoff
+
+**Logging**:
+- Use `logrus` via `log` package
+- Levels: `log.Debugf`, `log.Infof`, `log.Warnf`, `log.Errorf`
+- Include driver name in logs: `log.Infof("[driver_name] message")`
+
+## Project Context
+
+OpenList is a community-driven fork of AList, focused on:
+- Long-term governance and trust
+- Support for 70+ cloud storage providers
+- Web UI for file management
+- Multi-protocol support (HTTP, WebDAV, FTP, SFTP, S3)
+- Offline downloads (Aria2, Transmission)
+- Full-text search
+- Archive extraction
+
+**License**: AGPL-3.0
diff --git a/COMPATIBILITY_REPORT.md b/COMPATIBILITY_REPORT.md
new file mode 100644
index 000000000..0b8be3b52
--- /dev/null
+++ b/COMPATIBILITY_REPORT.md
@@ -0,0 +1,204 @@
+# Rebase兼容性分析报告
+
+## 提交概览
+共引入 **21个commits**,主要涉及以下模块:
+
+### 核心功能改动
+
+#### 1. **链接刷新机制** (`internal/stream/util.go`)
+**Commits**:
+- `4c33ffa4` feat(link): add link refresh capability for expired download links
+- `f38fe180` fix(stream): 修复链接过期检测逻辑,避免将上下文取消视为链接过期
+- `7cf362c6` fix(stream): 更新过期链接检查逻辑,支持所有4xx客户端错误
+- `03fbaf1c` refactor(stream): 移除过时的链接刷新逻辑,添加自愈读取器以处理0字节读取
+
+**核心代码**:
+```go
+// 新增常量
+MAX_LINK_REFRESH_COUNT = 50 // 链接最大刷新次数
+MAX_RANGE_READ_RETRY_COUNT = 5 // RangeRead重试次数(从3提升到5)
+
+// 新增函数
+IsLinkExpiredError(err error) bool // 判断是否为链接过期错误
+
+// 新增结构
+RefreshableRangeReader struct {
+ link *model.Link
+ size int64
+ innerReader model.RangeReaderIF
+ mu sync.Mutex
+ refreshCount int // 防止无限循环
+}
+
+selfHealingReadCloser struct {
+ // 检测0字节读取,自动刷新链接
+}
+```
+
+**功能说明**:
+1. **链接过期检测**: 识别多种云盘的过期错误(expired, token expired, access denied, 4xx状态码等)
+2. **自动刷新**: 检测到过期时自动调用Refresher获取新链接,最多刷新50次
+3. **自愈机制**: 处理某些云盘返回200但内容为空的情况(0字节读取检测)
+4. **并发安全**: 使用sync.Mutex保护共享状态
+5. **Context隔离**: 刷新时使用WithoutCancel避免用户取消操作影响刷新
+
+**潜在风险**:
+- ✅ Context.WithoutCancel需要Go 1.21+
+- ✅ 并发场景下的锁竞争
+- ✅ refreshCount可能在某些场景下不递增导致无限循环
+
+---
+
+#### 2. **目录预创建优化** (`internal/fs/copy_move.go`)
+**Commit**: `ce0da112` fix(copy_move): 将预创建子目录的深度从2级调整为1级
+
+**核心代码**:
+```go
+func (t *FileTransferTask) preCreateDirectoryTree(objs []model.Obj, dstBasePath string, maxDepth int) error {
+ // 第一轮:创建直接子目录
+ for _, obj := range objs {
+ if obj.IsDir() {
+ subdirPath := stdpath.Join(dstBasePath, obj.GetName())
+ op.MakeDir(t.Ctx(), t.DstStorage, subdirPath)
+ subdirs = append(subdirs, obj)
+ }
+ }
+
+ // 停止递归条件
+ if maxDepth <= 0 {
+ return nil
+ }
+
+ // 第二轮:递归创建嵌套目录
+ for _, subdir := range subdirs {
+ subObjs := op.List(...)
+ preCreateDirectoryTree(subObjs, subdirDstPath, maxDepth-1)
+ }
+}
+```
+
+**功能说明**:
+1. **深度控制**: 默认maxDepth=1,只预创建2级目录(当前+子级)
+2. **防止深度递归**: 避免在大型项目中递归过深导致栈溢出或性能问题
+3. **错误容忍**: MakeDir失败时继续处理其他目录
+4. **Context感知**: 每次循环检查ctx.Err()支持取消操作
+
+**潜在风险**:
+- ✅ op.MakeDir和op.List调用需要存储初始化
+- ✅ 大量目录时的性能问题
+- ✅ Context取消时的资源清理
+
+---
+
+#### 3. **网络优化** (`drivers/`, `internal/net/`)
+**Commits**:
+- `b9dafa65` feat(network): 增加对慢速网络的支持,调整超时和重试机制
+- `bce47884` fix(driver): 增加夸克分片大小调整逻辑,支持重试机制
+- `0b8471f6` feat(quark_open): 添加速率限制和重试逻辑
+
+**功能说明**:
+1. 提升RangeRead重试次数: 3 → 5
+2. 调整网络超时参数
+3. 添加分片上传重试逻辑
+
+---
+
+#### 4. **驱动修复**
+**Commits**:
+- `da2812c0` fix(google_drive): 更新Put方法以支持可重复读取流和不可重复读取流的MD5校验
+- `5a6bad90` feat(google_drive): 添加文件夹创建的锁机制和重试逻辑
+- `a54b2388` feat(google_drive): 添加处理重复文件名的功能
+- `9ef22ec9` fix(driver): fix file copy failure to 123pan due to incorrect etag
+- `0ead87ef` fix(alias): update storage retrieval method in listRoot function
+- `311f6246` fix: 修复500 panic和NaN问题
+
+---
+
+## 兼容性评估
+
+### ✅ 编译兼容性
+- 构建成功,无语法错误
+- 依赖版本无冲突
+
+### ✅ API兼容性
+- 新增函数不破坏现有接口
+- RefreshableRangeReader实现model.RangeReaderIF接口
+- 向后兼容旧代码
+
+### ⚠️ 运行时兼容性
+**需要验证的场景**:
+1. **并发安全**: RefreshableRangeReader的并发读取
+2. **资源泄漏**: Context取消时goroutine是否正确退出
+3. **边界条件**:
+ - refreshCount达到50次的行为
+ - 0字节读取检测的准确性
+ - maxDepth=0时的目录创建
+4. **错误处理**:
+ - nil Refresher时的处理
+ - 链接刷新失败时的回退机制
+5. **性能**:
+ - 大文件下载时的刷新开销
+ - 深层目录结构的预创建性能
+
+---
+
+## 测试需求
+
+### 必须测试的场景
+
+#### Stream包测试
+1. **IsLinkExpiredError准确性**
+ - 各种云盘的过期错误格式
+ - Context取消不应判断为过期
+ - HTTP 4xx/5xx的区分
+
+2. **RefreshableRangeReader可靠性**
+ - 正常读取流程
+ - 自动刷新触发和成功
+ - 达到最大刷新次数
+ - 并发读取安全性
+ - Context取消的正确处理
+
+3. **selfHealingReadCloser**
+ - 0字节读取检测
+ - 刷新重试机制
+ - 资源正确关闭
+
+#### FS包测试
+1. **preCreateDirectoryTree**
+ - 深度控制正确性(0, 1, 2级)
+ - 大量目录的性能
+ - Context取消的响应
+ - 错误容忍性
+
+---
+
+## 风险等级: **中等**
+
+**原因**:
+- ✅ 新功能设计合理,有明确的边界和错误处理
+- ⚠️ 并发场景需要充分测试
+- ⚠️ 链接刷新逻辑复杂,需要验证各种边界情况
+- ⚠️ 依赖op包的函数需要正确的初始化
+
+---
+
+## 推送建议: **通过测试后可推送**
+
+**前置条件**:
+1. 完成全面的单元测试(见下方测试代码)
+2. 验证并发安全性
+3. 确认Context取消不会导致资源泄漏
+4. 性能测试通过(大文件、深层目录)
+
+**建议测试命令**:
+```bash
+# 单元测试
+go test ./internal/stream ./internal/fs -v -count=1 -race
+
+# 压力测试
+go test ./internal/stream -run Stress -v -count=10
+
+# 完整测试套件
+go test ./... -short -count=1
+```
diff --git a/build.sh b/build.sh
index 26e5a301b..0e8f4b85d 100644
--- a/build.sh
+++ b/build.sh
@@ -186,8 +186,8 @@ BuildDockerMultiplatform() {
docker_lflags="--extldflags '-static -fpic' $ldflags"
export CGO_ENABLED=1
- OS_ARCHES=(linux-amd64 linux-arm64 linux-386 linux-riscv64 linux-ppc64le linux-loong64) ## Disable linux-s390x builds
- CGO_ARGS=(x86_64-linux-musl-gcc aarch64-linux-musl-gcc i486-linux-musl-gcc riscv64-linux-musl-gcc powerpc64le-linux-musl-gcc loongarch64-linux-musl-gcc) ## Disable s390x-linux-musl-gcc builds
+ OS_ARCHES=(linux-amd64) ## Disable linux-s390x builds
+ CGO_ARGS=(x86_64-linux-musl-gcc) ## Disable s390x-linux-musl-gcc builds
for i in "${!OS_ARCHES[@]}"; do
os_arch=${OS_ARCHES[$i]}
cgo_cc=${CGO_ARGS[$i]}
@@ -205,14 +205,14 @@ BuildDockerMultiplatform() {
GO_ARM=(6 7)
export GOOS=linux
export GOARCH=arm
- for i in "${!DOCKER_ARM_ARCHES[@]}"; do
- docker_arch=${DOCKER_ARM_ARCHES[$i]}
- cgo_cc=${CGO_ARGS[$i]}
- export GOARM=${GO_ARM[$i]}
- export CC=${cgo_cc}
- echo "building for $docker_arch"
- go build -o build/${docker_arch%%-*}/${docker_arch##*-}/"$appName" -ldflags="$docker_lflags" -tags=jsoniter .
- done
+ # for i in "${!DOCKER_ARM_ARCHES[@]}"; do
+ # docker_arch=${DOCKER_ARM_ARCHES[$i]}
+ # cgo_cc=${CGO_ARGS[$i]}
+ # export GOARM=${GO_ARM[$i]}
+ # export CC=${cgo_cc}
+ # echo "building for $docker_arch"
+ # go build -o build/${docker_arch%%-*}/${docker_arch##*-}/"$appName" -ldflags="$docker_lflags" -tags=jsoniter .
+ # done
}
BuildRelease() {
diff --git a/drivers/115/driver.go b/drivers/115/driver.go
index 162d835d0..d4f5741d0 100644
--- a/drivers/115/driver.go
+++ b/drivers/115/driver.go
@@ -68,8 +68,7 @@ func (d *Pan115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
return nil, err
}
userAgent := args.Header.Get("User-Agent")
- downloadInfo, err := d.
- DownloadWithUA(file.(*FileObj).PickCode, userAgent)
+ downloadInfo, err := d.client.DownloadWithUA(file.(*FileObj).PickCode, userAgent)
if err != nil {
return nil, err
}
diff --git a/drivers/115/types.go b/drivers/115/types.go
index 28a8ced30..3477ffed0 100644
--- a/drivers/115/types.go
+++ b/drivers/115/types.go
@@ -22,6 +22,10 @@ func (f *FileObj) GetHash() utils.HashInfo {
return utils.NewHashInfo(utils.SHA1, f.Sha1)
}
+func (f *FileObj) Thumb() string {
+ return f.ThumbURL
+}
+
type UploadResult struct {
driver.BasicResp
Data struct {
diff --git a/drivers/115/util.go b/drivers/115/util.go
index b000436b2..7ae375b75 100644
--- a/drivers/115/util.go
+++ b/drivers/115/util.go
@@ -9,7 +9,6 @@ import (
"encoding/json"
"fmt"
"io"
- "net/http"
"net/url"
"strconv"
"strings"
@@ -22,11 +21,9 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
- "github.com/aliyun/aliyun-oss-go-sdk/oss"
-
cipher "github.com/SheltonZhu/115driver/pkg/crypto/ec115"
- crypto "github.com/SheltonZhu/115driver/pkg/crypto/m115"
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
+ "github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/pkg/errors"
)
@@ -108,60 +105,6 @@ func (d *Pan115) getUA() string {
return fmt.Sprintf("Mozilla/5.0 115Browser/%s", appVer)
}
-func (d *Pan115) DownloadWithUA(pickCode, ua string) (*driver115.DownloadInfo, error) {
- key := crypto.GenerateKey()
- result := driver115.DownloadResp{}
- params, err := utils.Json.Marshal(map[string]string{"pick_code": pickCode})
- if err != nil {
- return nil, err
- }
-
- data := crypto.Encode(params, key)
-
- bodyReader := strings.NewReader(url.Values{"data": []string{data}}.Encode())
- reqUrl := fmt.Sprintf("%s?t=%s", driver115.AndroidApiDownloadGetUrl, driver115.Now().String())
- req, _ := http.NewRequest(http.MethodPost, reqUrl, bodyReader)
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
- req.Header.Set("Cookie", d.Cookie)
- req.Header.Set("User-Agent", ua)
-
- resp, err := d.client.Client.GetClient().Do(req)
- if err != nil {
- return nil, err
- }
- defer resp.Body.Close()
-
- body, err := io.ReadAll(resp.Body)
- if err != nil {
- return nil, err
- }
- if err := utils.Json.Unmarshal(body, &result); err != nil {
- return nil, err
- }
-
- if err = result.Err(string(body)); err != nil {
- return nil, err
- }
-
- b, err := crypto.Decode(string(result.EncodedData), key)
- if err != nil {
- return nil, err
- }
-
- downloadInfo := struct {
- Url string `json:"url"`
- }{}
- if err := utils.Json.Unmarshal(b, &downloadInfo); err != nil {
- return nil, err
- }
-
- info := &driver115.DownloadInfo{}
- info.PickCode = pickCode
- info.Header = resp.Request.Header
- info.Url.Url = downloadInfo.Url
- return info, nil
-}
-
func (c *Pan115) GenerateToken(fileID, preID, timeStamp, fileSize, signKey, signVal string) string {
userID := strconv.FormatInt(c.client.UserID, 10)
userIDMd5 := md5.Sum([]byte(userID))
@@ -309,7 +252,8 @@ func (c *Pan115) UploadByOSS(ctx context.Context, params *driver115.UploadOSSPar
// UploadByMultipart upload by mutipart blocks
func (d *Pan115) UploadByMultipart(ctx context.Context, params *driver115.UploadOSSParams, fileSize int64, s model.FileStreamer,
- dirID string, up driver.UpdateProgress, opts ...driver115.UploadMultipartOption) (*UploadResult, error) {
+ dirID string, up driver.UpdateProgress, opts ...driver115.UploadMultipartOption,
+) (*UploadResult, error) {
var (
chunks []oss.FileChunk
parts []oss.UploadPart
diff --git a/drivers/115_open/driver.go b/drivers/115_open/driver.go
index c1a855749..9033365a5 100644
--- a/drivers/115_open/driver.go
+++ b/drivers/115_open/driver.go
@@ -17,6 +17,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
+ log "github.com/sirupsen/logrus"
"golang.org/x/time/rate"
)
@@ -74,13 +75,20 @@ func (d *Open115) Drop(ctx context.Context) error {
}
func (d *Open115) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
+ start := time.Now()
+ log.Infof("[115] List request started for dir: %s (ID: %s)", dir.GetName(), dir.GetID())
+
var res []model.Obj
pageSize := int64(d.PageSize)
offset := int64(0)
+ pageCount := 0
+
for {
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
+
+ pageStart := time.Now()
resp, err := d.client.GetFiles(ctx, &sdk.GetFilesReq{
CID: dir.GetID(),
Limit: pageSize,
@@ -90,7 +98,12 @@ func (d *Open115) List(ctx context.Context, dir model.Obj, args model.ListArgs)
// Cur: 1,
ShowDir: true,
})
+ pageDuration := time.Since(pageStart)
+ pageCount++
+ log.Infof("[115] GetFiles page %d took: %v (offset=%d, limit=%d)", pageCount, pageDuration, offset, pageSize)
+
if err != nil {
+ log.Errorf("[115] GetFiles page %d failed after %v: %v", pageCount, pageDuration, err)
return nil, err
}
res = append(res, utils.MustSliceConvert(resp.Data, func(src sdk.GetFilesResp_File) model.Obj {
@@ -102,10 +115,17 @@ func (d *Open115) List(ctx context.Context, dir model.Obj, args model.ListArgs)
}
offset += pageSize
}
+
+ totalDuration := time.Since(start)
+ log.Infof("[115] List request completed in %v (%d pages, %d files)", totalDuration, pageCount, len(res))
+
return res, nil
}
func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
+ start := time.Now()
+ log.Infof("[115] Link request started for file: %s", file.GetName())
+
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
@@ -121,14 +141,25 @@ func (d *Open115) Link(ctx context.Context, file model.Obj, args model.LinkArgs)
return nil, fmt.Errorf("can't convert obj")
}
pc := obj.Pc
+
+ apiStart := time.Now()
+ log.Infof("[115] Calling DownURL API...")
resp, err := d.client.DownURL(ctx, pc, ua)
+ apiDuration := time.Since(apiStart)
+ log.Infof("[115] DownURL API took: %v", apiDuration)
+
if err != nil {
+ log.Errorf("[115] DownURL API failed after %v: %v", apiDuration, err)
return nil, err
}
u, ok := resp[obj.GetID()]
if !ok {
return nil, fmt.Errorf("can't get link")
}
+
+ totalDuration := time.Since(start)
+ log.Infof("[115] Link request completed in %v (API: %v)", totalDuration, apiDuration)
+
return &model.Link{
URL: u.URL.URL,
Header: http.Header{
@@ -176,7 +207,7 @@ func (d *Open115) Rename(ctx context.Context, srcObj model.Obj, newName string)
}
_, err := d.client.UpdateFile(ctx, &sdk.UpdateFileReq{
FileID: srcObj.GetID(),
- FileNma: newName,
+ FileName: newName,
})
if err != nil {
return nil, err
@@ -226,27 +257,97 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
if err != nil {
return err
}
+
sha1 := file.GetHash().GetHash(utils.SHA1)
- if len(sha1) != utils.SHA1.Width {
- _, sha1, err = stream.CacheFullAndHash(file, &up, utils.SHA1)
+ sha1128k := file.GetHash().GetHash(utils.SHA1_128K)
+
+ // 检查是否是可重复读取的流
+ _, isSeekable := file.(*stream.SeekableStream)
+
+ // 如果有预计算的 hash,先尝试秒传
+ if len(sha1) == utils.SHA1.Width && len(sha1128k) == utils.SHA1_128K.Width {
+ resp, err := d.client.UploadInit(ctx, &sdk.UploadInitReq{
+ FileName: file.GetName(),
+ FileSize: file.GetSize(),
+ Target: dstDir.GetID(),
+ FileID: strings.ToUpper(sha1),
+ PreID: strings.ToUpper(sha1128k),
+ })
if err != nil {
return err
}
+ if resp.Status == 2 {
+ up(100)
+ return nil
+ }
+ // 秒传失败,继续后续流程
}
- const PreHashSize int64 = 128 * utils.KB
- hashSize := PreHashSize
- if file.GetSize() < PreHashSize {
- hashSize = file.GetSize()
- }
- reader, err := file.RangeRead(http_range.Range{Start: 0, Length: hashSize})
- if err != nil {
- return err
- }
- sha1128k, err := utils.HashReader(utils.SHA1, reader)
- if err != nil {
- return err
+
+ if isSeekable {
+ // 可重复读取的流,使用 RangeRead 计算 hash,不缓存
+ if len(sha1) != utils.SHA1.Width {
+ sha1, err = stream.StreamHashFile(file, utils.SHA1, 100, &up)
+ if err != nil {
+ return err
+ }
+ }
+ // 计算 sha1_128k(如果没有预计算)
+ if len(sha1128k) != utils.SHA1_128K.Width {
+ const PreHashSize int64 = 128 * utils.KB
+ hashSize := PreHashSize
+ if file.GetSize() < PreHashSize {
+ hashSize = file.GetSize()
+ }
+ reader, err := file.RangeRead(http_range.Range{Start: 0, Length: hashSize})
+ if err != nil {
+ return err
+ }
+ sha1128k, err = utils.HashReader(utils.SHA1, reader)
+ if err != nil {
+ return err
+ }
+ }
+ } else {
+ // 不可重复读取的流(如 HTTP body)
+ // 如果有预计算的 hash,上面已经尝试过秒传了
+ if len(sha1) == utils.SHA1.Width && len(sha1128k) == utils.SHA1_128K.Width {
+ // 秒传失败,需要缓存文件进行实际上传
+ _, err = file.CacheFullAndWriter(&up, nil)
+ if err != nil {
+ return err
+ }
+ } else {
+ // 没有预计算的 hash,缓存整个文件并计算
+ if len(sha1) != utils.SHA1.Width {
+ _, sha1, err = stream.CacheFullAndHash(file, &up, utils.SHA1)
+ if err != nil {
+ return err
+ }
+ } else if file.GetFile() == nil {
+ // 有 SHA1 但没有缓存,需要缓存以支持后续 RangeRead
+ _, err = file.CacheFullAndWriter(&up, nil)
+ if err != nil {
+ return err
+ }
+ }
+ // 计算 sha1_128k
+ const PreHashSize int64 = 128 * utils.KB
+ hashSize := PreHashSize
+ if file.GetSize() < PreHashSize {
+ hashSize = file.GetSize()
+ }
+ reader, err := file.RangeRead(http_range.Range{Start: 0, Length: hashSize})
+ if err != nil {
+ return err
+ }
+ sha1128k, err = utils.HashReader(utils.SHA1, reader)
+ if err != nil {
+ return err
+ }
+ }
}
- // 1. Init
+
+ // 1. Init(SeekableStream 或已缓存的 FileStream)
resp, err := d.client.UploadInit(ctx, &sdk.UploadInitReq{
FileName: file.GetName(),
FileSize: file.GetSize(),
@@ -272,11 +373,11 @@ func (d *Open115) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
if err != nil {
return err
}
- reader, err = file.RangeRead(http_range.Range{Start: start, Length: end - start + 1})
+ signReader, err := file.RangeRead(http_range.Range{Start: start, Length: end - start + 1})
if err != nil {
return err
}
- signVal, err := utils.HashReader(utils.SHA1, reader)
+ signVal, err := utils.HashReader(utils.SHA1, signReader)
if err != nil {
return err
}
@@ -319,10 +420,21 @@ func (d *Open115) DeleteOfflineTask(ctx context.Context, infoHash string, delete
}
func (d *Open115) OfflineList(ctx context.Context) (*sdk.OfflineTaskListResp, error) {
+ // 获取第一页
resp, err := d.client.OfflineTaskList(ctx, 1)
if err != nil {
return nil, err
}
+ // 如果有多页,获取所有页面的任务
+ if resp.PageCount > 1 {
+ for page := 2; page <= resp.PageCount; page++ {
+ pageResp, err := d.client.OfflineTaskList(ctx, int64(page))
+ if err != nil {
+ return nil, err
+ }
+ resp.Tasks = append(resp.Tasks, pageResp.Tasks...)
+ }
+ }
return resp, nil
}
diff --git a/drivers/115_open/upload.go b/drivers/115_open/upload.go
index 3575678c2..292c8371b 100644
--- a/drivers/115_open/upload.go
+++ b/drivers/115_open/upload.go
@@ -4,6 +4,7 @@ import (
"context"
"encoding/base64"
"io"
+ "strings"
"time"
sdk "github.com/OpenListTeam/115-sdk-go"
@@ -13,8 +14,19 @@ import (
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/avast/retry-go"
+ log "github.com/sirupsen/logrus"
)
+// isTokenExpiredError 检测是否为OSS凭证过期错误
+func isTokenExpiredError(err error) bool {
+ if err == nil {
+ return false
+ }
+ errStr := err.Error()
+ return strings.Contains(errStr, "SecurityTokenExpired") ||
+ strings.Contains(errStr, "InvalidAccessKeyId")
+}
+
func calPartSize(fileSize int64) int64 {
var partSize int64 = 20 * utils.MB
if fileSize > partSize {
@@ -70,11 +82,16 @@ func (d *Open115) singleUpload(ctx context.Context, tempF model.File, tokenResp
// }
func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer, up driver.UpdateProgress, tokenResp *sdk.UploadGetTokenResp, initResp *sdk.UploadInitResp) error {
- ossClient, err := oss.New(tokenResp.Endpoint, tokenResp.AccessKeyId, tokenResp.AccessKeySecret, oss.SecurityToken(tokenResp.SecurityToken))
- if err != nil {
- return err
+ // 创建OSS客户端的辅助函数
+ createBucket := func(token *sdk.UploadGetTokenResp) (*oss.Bucket, error) {
+ ossClient, err := oss.New(token.Endpoint, token.AccessKeyId, token.AccessKeySecret, oss.SecurityToken(token.SecurityToken))
+ if err != nil {
+ return nil, err
+ }
+ return ossClient.Bucket(initResp.Bucket)
}
- bucket, err := ossClient.Bucket(initResp.Bucket)
+
+ bucket, err := createBucket(tokenResp)
if err != nil {
return err
}
@@ -119,7 +136,24 @@ func (d *Open115) multpartUpload(ctx context.Context, stream model.FileStreamer,
retry.Context(ctx),
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
- retry.Delay(time.Second))
+ retry.Delay(time.Second),
+ retry.OnRetry(func(n uint, err error) {
+ // 如果是凭证过期错误,在重试前刷新凭证并重建bucket
+ if isTokenExpiredError(err) {
+ log.Warnf("115 OSS token expired, refreshing token...")
+ if newToken, refreshErr := d.client.UploadGetToken(ctx); refreshErr == nil {
+ tokenResp = newToken
+ if newBucket, bucketErr := createBucket(tokenResp); bucketErr == nil {
+ bucket = newBucket
+ log.Infof("115 OSS token refreshed successfully")
+ } else {
+ log.Errorf("Failed to create new bucket with refreshed token: %v", bucketErr)
+ }
+ } else {
+ log.Errorf("Failed to refresh 115 OSS token: %v", refreshErr)
+ }
+ }
+ }))
ss.FreeSectionReader(rd)
if err != nil {
return err
diff --git a/drivers/115_share/driver.go b/drivers/115_share/driver.go
index 00fa623e6..fe8b7733a 100644
--- a/drivers/115_share/driver.go
+++ b/drivers/115_share/driver.go
@@ -3,6 +3,7 @@ package _115_share
import (
"context"
+ "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
@@ -49,9 +50,16 @@ func (d *Pan115Share) List(ctx context.Context, dir model.Obj, args model.ListAr
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
-
+ var ua string
+ // TODO: will use user agent from header
+ // if args.Header != nil {
+ // ua = args.Header.Get("User-Agent")
+ // }
+ if ua == "" {
+ ua = base.UserAgentNT
+ }
files := make([]driver115.ShareFile, 0)
- fileResp, err := d.client.GetShareSnap(d.ShareCode, d.ReceiveCode, dir.GetID(), driver115.QueryLimit(int(d.PageSize)))
+ fileResp, err := d.client.GetShareSnapWithUA(ua, d.ShareCode, d.ReceiveCode, dir.GetID(), driver115.QueryLimit(int(d.PageSize)))
if err != nil {
return nil, err
}
@@ -77,7 +85,14 @@ func (d *Pan115Share) Link(ctx context.Context, file model.Obj, args model.LinkA
if err := d.WaitLimit(ctx); err != nil {
return nil, err
}
- downloadInfo, err := d.client.DownloadByShareCode(d.ShareCode, d.ReceiveCode, file.GetID())
+ var ua string
+ if args.Header != nil {
+ ua = args.Header.Get("User-Agent")
+ }
+ if ua == "" {
+ ua = base.UserAgent
+ }
+ downloadInfo, err := d.client.DownloadByShareCodeWithUA(ua, d.ShareCode, d.ReceiveCode, file.GetID())
if err != nil {
return nil, err
}
diff --git a/drivers/115_share/utils.go b/drivers/115_share/utils.go
index 082d9d462..f9575d493 100644
--- a/drivers/115_share/utils.go
+++ b/drivers/115_share/utils.go
@@ -5,6 +5,7 @@ import (
"strconv"
"time"
+ "github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
driver115 "github.com/SheltonZhu/115driver/pkg/driver"
@@ -20,6 +21,7 @@ type FileObj struct {
FileName string
isDir bool
FileID string
+ ThumbURL string
}
func (f *FileObj) CreateTime() time.Time {
@@ -54,6 +56,10 @@ func (f *FileObj) GetPath() string {
return ""
}
+func (f *FileObj) Thumb() string {
+ return f.ThumbURL
+}
+
func transFunc(sf driver115.ShareFile) (model.Obj, error) {
timeInt, err := strconv.ParseInt(sf.UpdateTime, 10, 64)
if err != nil {
@@ -74,15 +80,14 @@ func transFunc(sf driver115.ShareFile) (model.Obj, error) {
FileName: string(sf.FileName),
isDir: isDir,
FileID: fileID,
+ ThumbURL: sf.ThumbURL,
}, nil
}
-var UserAgent = driver115.UA115Browser
-
func (d *Pan115Share) login() error {
var err error
opts := []driver115.Option{
- driver115.UA(UserAgent),
+ driver115.UA(base.UserAgentNT),
}
d.client = driver115.New(opts...)
if _, err := d.client.GetShareSnap(d.ShareCode, d.ReceiveCode, ""); err != nil {
diff --git a/drivers/123_open/driver.go b/drivers/123_open/driver.go
index 9608cedf9..15daf30cf 100644
--- a/drivers/123_open/driver.go
+++ b/drivers/123_open/driver.go
@@ -175,20 +175,71 @@ func (d *Open123) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) {
- // 1. 创建文件
+ // 1. 准备参数
// parentFileID 父目录id,上传到根目录时填写 0
parentFileId, err := strconv.ParseInt(dstDir.GetID(), 10, 64)
if err != nil {
return nil, fmt.Errorf("parse parentFileID error: %v", err)
}
+
+ // 尝试 SHA1 秒传
+ sha1Hash := file.GetHash().GetHash(utils.SHA1)
+ if len(sha1Hash) == utils.SHA1.Width {
+ resp, err := d.sha1Reuse(parentFileId, file.GetName(), sha1Hash, file.GetSize(), 2)
+ if err == nil && resp.Data.Reuse {
+ return File{
+ FileName: file.GetName(),
+ Size: file.GetSize(),
+ FileId: resp.Data.FileID,
+ Type: 2,
+ SHA1: sha1Hash,
+ }, nil
+ }
+ }
+
+
// etag 文件md5
etag := file.GetHash().GetHash(utils.MD5)
- if len(etag) < utils.MD5.Width {
+
+ // 检查是否是可重复读取的流
+ _, isSeekable := file.(*stream.SeekableStream)
+
+ // 如果有预计算的 hash,先尝试秒传
+ if len(etag) >= utils.MD5.Width {
+ createResp, err := d.create(parentFileId, file.GetName(), etag, file.GetSize(), 2, false)
+ if err != nil {
+ return nil, err
+ }
+ if createResp.Data.Reuse && createResp.Data.FileID != 0 {
+ return File{
+ FileName: file.GetName(),
+ Size: file.GetSize(),
+ FileId: createResp.Data.FileID,
+ Type: 2,
+ Etag: etag,
+ }, nil
+ }
+ // 秒传失败,继续后续流程
+ }
+
+ if isSeekable {
+ // 可重复读取的流,使用 RangeRead 计算 hash,不缓存
+ if len(etag) < utils.MD5.Width {
+ etag, err = stream.StreamHashFile(file, utils.MD5, 40, &up)
+ if err != nil {
+ return nil, err
+ }
+ }
+ } else {
+ // 不可重复读取的流(如 HTTP body)
+ // 秒传失败或没有 hash,缓存整个文件并计算 MD5
_, etag, err = stream.CacheFullAndHash(file, &up, utils.MD5)
if err != nil {
return nil, err
}
}
+
+ // 2. 创建上传任务(或再次尝试秒传)
createResp, err := d.create(parentFileId, file.GetName(), etag, file.GetSize(), 2, false)
if err != nil {
return nil, err
@@ -207,13 +258,16 @@ func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStre
}
}
- // 2. 上传分片
- err = d.Upload(ctx, file, createResp, up)
+ // 3. 上传分片
+ uploadProgress := func(p float64) {
+ up(40 + p*0.6)
+ }
+ err = d.Upload(ctx, file, createResp, uploadProgress)
if err != nil {
return nil, err
}
- // 3. 上传完毕
+ // 4. 合并分片/完成上传
for range 60 {
uploadCompleteResp, err := d.complete(createResp.Data.PreuploadID)
// 返回错误代码未知,如:20103,文档也没有具体说
diff --git a/drivers/123_open/types.go b/drivers/123_open/types.go
index 8745ff795..7d586c8b0 100644
--- a/drivers/123_open/types.go
+++ b/drivers/123_open/types.go
@@ -58,9 +58,13 @@ type File struct {
Category int `json:"category"`
Status int `json:"status"`
Trashed int `json:"trashed"`
+ SHA1 string
}
func (f File) GetHash() utils.HashInfo {
+ if len(f.SHA1) == utils.SHA1.Width && len(f.Etag) != utils.MD5.Width {
+ return utils.NewHashInfo(utils.SHA1, f.SHA1)
+ }
return utils.NewHashInfo(utils.MD5, f.Etag)
}
@@ -190,6 +194,14 @@ type UploadCompleteResp struct {
} `json:"data"`
}
+type SHA1ReuseResp struct {
+ BaseResp
+ Data struct {
+ FileID int64 `json:"fileID"`
+ Reuse bool `json:"reuse"`
+ } `json:"data"`
+}
+
type OfflineDownloadResp struct {
BaseResp
Data struct {
diff --git a/drivers/123_open/upload.go b/drivers/123_open/upload.go
index 90cff90d7..0e03684e9 100644
--- a/drivers/123_open/upload.go
+++ b/drivers/123_open/upload.go
@@ -183,3 +183,21 @@ func (d *Open123) complete(preuploadID string) (*UploadCompleteResp, error) {
}
return &resp, nil
}
+
+// SHA1 秒传
+func (d *Open123) sha1Reuse(parentFileID int64, filename string, sha1Hash string, size int64, duplicate int) (*SHA1ReuseResp, error) {
+ var resp SHA1ReuseResp
+ _, err := d.Request(UploadSHA1Reuse, http.MethodPost, func(req *resty.Request) {
+ req.SetBody(base.Json{
+ "parentFileID": parentFileID,
+ "filename": filename,
+ "sha1": strings.ToLower(sha1Hash),
+ "size": size,
+ "duplicate": duplicate,
+ })
+ }, &resp)
+ if err != nil {
+ return nil, err
+ }
+ return &resp, nil
+}
diff --git a/drivers/123_open/util.go b/drivers/123_open/util.go
index 5d961d5c2..1b6eea2da 100644
--- a/drivers/123_open/util.go
+++ b/drivers/123_open/util.go
@@ -21,16 +21,17 @@ import (
var ( // 不同情况下获取的AccessTokenQPS限制不同 如下模块化易于拓展
Api = "https://open-api.123pan.com"
- UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1)
- FileList = InitApiInfo(Api+"/api/v2/file/list", 3)
- DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 5)
- DirectLink = InitApiInfo(Api+"/api/v1/direct-link/url", 5)
- Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2)
- Move = InitApiInfo(Api+"/api/v1/file/move", 1)
- Rename = InitApiInfo(Api+"/api/v1/file/name", 1)
- Trash = InitApiInfo(Api+"/api/v1/file/trash", 2)
- UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2)
- UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0)
+ UserInfo = InitApiInfo(Api+"/api/v1/user/info", 1)
+ FileList = InitApiInfo(Api+"/api/v2/file/list", 3)
+ DownloadInfo = InitApiInfo(Api+"/api/v1/file/download_info", 5)
+ DirectLink = InitApiInfo(Api+"/api/v1/direct-link/url", 5)
+ Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2)
+ Move = InitApiInfo(Api+"/api/v1/file/move", 1)
+ Rename = InitApiInfo(Api+"/api/v1/file/name", 1)
+ Trash = InitApiInfo(Api+"/api/v1/file/trash", 2)
+ UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2)
+ UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0)
+ UploadSHA1Reuse = InitApiInfo(Api+"/upload/v2/file/sha1_reuse", 2)
OfflineDownload = InitApiInfo(Api+"/api/v1/offline/download", 1)
OfflineDownloadProcess = InitApiInfo(Api+"/api/v1/offline/download/process", 5)
diff --git a/drivers/alias/driver.go b/drivers/alias/driver.go
index 64376957e..e1ba41eb6 100644
--- a/drivers/alias/driver.go
+++ b/drivers/alias/driver.go
@@ -229,6 +229,15 @@ func (d *Alias) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([
for _, obj := range objMap {
objs = append(objs, obj)
}
+ if d.OrderBy == "" {
+ sort := getAllSort(dirs)
+ if sort.OrderBy != "" {
+ model.SortFiles(objs, sort.OrderBy, sort.OrderDirection)
+ }
+ if d.ExtractFolder == "" && sort.ExtractFolder != "" {
+ model.ExtractFolder(objs, sort.ExtractFolder)
+ }
+ }
return objs, nil
}
@@ -276,21 +285,38 @@ func (d *Alias) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (
}, nil
}
- reqPath := d.getBalancedPath(ctx, file)
- link, fi, err := d.link(ctx, reqPath, args)
+ var link *model.Link
+ var fi model.Obj
+ var err error
+ files := file.(BalancedObjs)
+ if d.ReadConflictPolicy == RandomBalancedRP || d.ReadConflictPolicy == AllRWP {
+ rand.Shuffle(len(files), func(i, j int) {
+ files[i], files[j] = files[j], files[i]
+ })
+ }
+ for _, f := range files {
+ if f == nil {
+ continue
+ }
+ link, fi, err = d.link(ctx, f.GetPath(), args)
+ if err == nil {
+ if link == nil {
+ // 重定向且需要通过代理
+ return &model.Link{
+ URL: fmt.Sprintf("%s/p%s?sign=%s",
+ common.GetApiUrl(ctx),
+ utils.EncodePath(f.GetPath(), true),
+ sign.Sign(f.GetPath())),
+ }, nil
+ }
+ break
+ }
+ }
if err != nil {
return nil, err
}
- if link == nil {
- // 重定向且需要通过代理
- return &model.Link{
- URL: fmt.Sprintf("%s/p%s?sign=%s",
- common.GetApiUrl(ctx),
- utils.EncodePath(reqPath, true),
- sign.Sign(reqPath)),
- }, nil
- }
resultLink := *link // 复制一份,避免修改到原始link
+ resultLink.Expiration = nil
resultLink.SyncClosers = utils.NewSyncClosers(link)
if args.Redirect {
return &resultLink, nil
diff --git a/drivers/alias/util.go b/drivers/alias/util.go
index 7336b9ba9..b37854394 100644
--- a/drivers/alias/util.go
+++ b/drivers/alias/util.go
@@ -40,7 +40,7 @@ func (d *Alias) listRoot(ctx context.Context, withDetails, refresh bool) []model
if !withDetails || len(v) != 1 {
continue
}
- remoteDriver, err := op.GetStorageByMountPath(v[0])
+ remoteDriver, err := fs.GetStorage(v[0], &fs.GetStoragesArgs{})
if err != nil {
continue
}
@@ -490,3 +490,43 @@ func (d *Alias) extract(ctx context.Context, reqPath string, args model.ArchiveI
link, _, err := op.DriverExtract(ctx, storage, reqActualPath, args)
return link, err
}
+
+func getAllSort(dirs []model.Obj) model.Sort {
+ ret := model.Sort{}
+ noSort := false
+ noExtractFolder := false
+ for _, dir := range dirs {
+ if dir == nil {
+ continue
+ }
+ storage, err := fs.GetStorage(dir.GetPath(), &fs.GetStoragesArgs{})
+ if err != nil {
+ continue
+ }
+ if !noSort && storage.GetStorage().OrderBy != "" {
+ if ret.OrderBy == "" {
+ ret.OrderBy = storage.GetStorage().OrderBy
+ ret.OrderDirection = storage.GetStorage().OrderDirection
+ if ret.OrderDirection == "" {
+ ret.OrderDirection = "asc"
+ }
+ } else if ret.OrderBy != storage.GetStorage().OrderBy || ret.OrderDirection != storage.GetStorage().OrderDirection {
+ ret.OrderBy = ""
+ ret.OrderDirection = ""
+ noSort = true
+ }
+ }
+ if !noExtractFolder && storage.GetStorage().ExtractFolder != "" {
+ if ret.ExtractFolder == "" {
+ ret.ExtractFolder = storage.GetStorage().ExtractFolder
+ } else if ret.ExtractFolder != storage.GetStorage().ExtractFolder {
+ ret.ExtractFolder = ""
+ noExtractFolder = true
+ }
+ }
+ if noSort && noExtractFolder {
+ break
+ }
+ }
+ return ret
+}
diff --git a/drivers/aliyundrive_open/upload.go b/drivers/aliyundrive_open/upload.go
index a4a6c1de1..00c806e5f 100644
--- a/drivers/aliyundrive_open/upload.go
+++ b/drivers/aliyundrive_open/upload.go
@@ -163,21 +163,29 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
}
count := int(math.Ceil(float64(stream.GetSize()) / float64(partSize)))
createData["part_info_list"] = makePartInfos(count)
+
+ // 检查是否是可重复读取的流
+ _, isSeekable := stream.(*streamPkg.SeekableStream)
+
// rapid upload
rapidUpload := !stream.IsForceStreamUpload() && stream.GetSize() > 100*utils.KB && d.RapidUpload
if rapidUpload {
log.Debugf("[aliyundrive_open] start cal pre_hash")
- // read 1024 bytes to calculate pre hash
- reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: 1024})
- if err != nil {
- return nil, err
- }
- hash, err := utils.HashReader(utils.SHA1, reader)
- if err != nil {
- return nil, err
+ // 优先使用预计算的 pre_hash
+ preHash := stream.GetHash().GetHash(utils.PRE_HASH)
+ if len(preHash) != utils.PRE_HASH.Width {
+ // 没有预计算的 pre_hash,使用 RangeRead 计算
+ reader, err := stream.RangeRead(http_range.Range{Start: 0, Length: 1024})
+ if err != nil {
+ return nil, err
+ }
+ preHash, err = utils.HashReader(utils.SHA1, reader)
+ if err != nil {
+ return nil, err
+ }
}
createData["size"] = stream.GetSize()
- createData["pre_hash"] = hash
+ createData["pre_hash"] = preHash
}
var createResp CreateResp
_, err, e := d.requestReturnErrResp(ctx, limiterOther, "/adrive/v1.0/openFile/create", http.MethodPost, func(req *resty.Request) {
@@ -191,9 +199,18 @@ func (d *AliyundriveOpen) upload(ctx context.Context, dstDir model.Obj, stream m
hash := stream.GetHash().GetHash(utils.SHA1)
if len(hash) != utils.SHA1.Width {
- _, hash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
- if err != nil {
- return nil, err
+ if isSeekable {
+ // 可重复读取的流,使用 StreamHashFile(RangeRead),不缓存
+ hash, err = streamPkg.StreamHashFile(stream, utils.SHA1, 50, &up)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ // 不可重复读取的流,缓存并计算
+ _, hash, err = streamPkg.CacheFullAndHash(stream, &up, utils.SHA1)
+ if err != nil {
+ return nil, err
+ }
}
}
diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go
index fe77aca38..474dd2b98 100644
--- a/drivers/baidu_netdisk/driver.go
+++ b/drivers/baidu_netdisk/driver.go
@@ -1,30 +1,18 @@
package baidu_netdisk
import (
- "bytes"
"context"
- "crypto/md5"
- "encoding/hex"
"errors"
- "io"
- "mime/multipart"
- "net/http"
"net/url"
- "os"
stdpath "path"
"strconv"
- "strings"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
- "github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
- "github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
- "github.com/OpenListTeam/OpenList/v4/internal/net"
- "github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
+ streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
- "github.com/avast/retry-go"
log "github.com/sirupsen/logrus"
)
@@ -37,6 +25,7 @@ type BaiduNetdisk struct {
}
var ErrUploadIDExpired = errors.New("uploadid expired")
+var ErrUploadURLExpired = errors.New("upload url expired or unavailable")
func (d *BaiduNetdisk) Config() driver.Config {
return config
@@ -199,80 +188,26 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
return newObj, nil
}
- var (
- cache = stream.GetFile()
- tmpF *os.File
- err error
- )
- if cache == nil {
- tmpF, err = os.CreateTemp(conf.Conf.TempDir, "file-*")
- if err != nil {
- return nil, err
- }
- defer func() {
- _ = tmpF.Close()
- _ = os.Remove(tmpF.Name())
- }()
- cache = tmpF
- }
-
streamSize := stream.GetSize()
sliceSize := d.getSliceSize(streamSize)
count := 1
if streamSize > sliceSize {
count = int((streamSize + sliceSize - 1) / sliceSize)
}
- lastBlockSize := streamSize % sliceSize
- if lastBlockSize == 0 {
- lastBlockSize = sliceSize
- }
-
- // cal md5 for first 256k data
- const SliceSize int64 = 256 * utils.KB
- blockList := make([]string, 0, count)
- byteSize := sliceSize
- fileMd5H := md5.New()
- sliceMd5H := md5.New()
- sliceMd5H2 := md5.New()
- slicemd5H2Write := utils.LimitWriter(sliceMd5H2, SliceSize)
- writers := []io.Writer{fileMd5H, sliceMd5H, slicemd5H2Write}
- if tmpF != nil {
- writers = append(writers, tmpF)
- }
- written := int64(0)
- for i := 1; i <= count; i++ {
- if utils.IsCanceled(ctx) {
- return nil, ctx.Err()
- }
- if i == count {
- byteSize = lastBlockSize
- }
- n, err := utils.CopyWithBufferN(io.MultiWriter(writers...), stream, byteSize)
- written += n
- if err != nil && err != io.EOF {
- return nil, err
- }
- blockList = append(blockList, hex.EncodeToString(sliceMd5H.Sum(nil)))
- sliceMd5H.Reset()
- }
- if tmpF != nil {
- if written != streamSize {
- return nil, errs.NewErr(err, "CreateTempFile failed, size mismatch: %d != %d ", written, streamSize)
- }
- _, err = tmpF.Seek(0, io.SeekStart)
- if err != nil {
- return nil, errs.NewErr(err, "CreateTempFile failed, can't seek to 0 ")
- }
- }
- contentMd5 := hex.EncodeToString(fileMd5H.Sum(nil))
- sliceMd5 := hex.EncodeToString(sliceMd5H2.Sum(nil))
- blockListStr, _ := utils.Json.MarshalToString(blockList)
path := stdpath.Join(dstDir.GetPath(), stream.GetName())
mtime := stream.ModTime().Unix()
ctime := stream.CreateTime().Unix()
- // step.1 尝试读取已保存进度
+ // step.1 流式计算MD5哈希值(使用 RangeRead,不会消耗流)
+ contentMd5, sliceMd5, blockList, err := d.calculateHashesStream(ctx, stream, sliceSize, &up)
+ if err != nil {
+ return nil, err
+ }
+
+ blockListStr, _ := utils.Json.MarshalToString(blockList)
+
+ // step.2 尝试读取已保存进度或执行预上传
precreateResp, ok := base.GetUploadProgress[*PrecreateResp](d, d.AccessToken, contentMd5)
if !ok {
// 没有进度,走预上传
@@ -288,6 +223,7 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
return fileToObj(precreateResp.File), nil
}
}
+
ensureUploadURL := func() {
if precreateResp.UploadURL != "" {
return
@@ -295,58 +231,20 @@ func (d *BaiduNetdisk) Put(ctx context.Context, dstDir model.Obj, stream model.F
precreateResp.UploadURL = d.getUploadUrl(path, precreateResp.Uploadid)
}
- // step.2 上传分片
+ // step.3 流式上传分片
+ // 创建 StreamSectionReader 用于上传
+ ss, err := streamPkg.NewStreamSectionReader(stream, int(sliceSize), &up)
+ if err != nil {
+ return nil, err
+ }
+
uploadLoop:
for range 2 {
// 获取上传域名
ensureUploadURL()
- // 并发上传
- threadG, upCtx := errgroup.NewGroupWithContext(ctx, d.uploadThread,
- retry.Attempts(UPLOAD_RETRY_COUNT),
- retry.Delay(UPLOAD_RETRY_WAIT_TIME),
- retry.MaxDelay(UPLOAD_RETRY_MAX_WAIT_TIME),
- retry.DelayType(retry.BackOffDelay),
- retry.RetryIf(func(err error) bool {
- return !errors.Is(err, ErrUploadIDExpired)
- }),
- retry.LastErrorOnly(true))
-
- totalParts := len(precreateResp.BlockList)
-
- for i, partseq := range precreateResp.BlockList {
- if utils.IsCanceled(upCtx) {
- break
- }
- if partseq < 0 {
- continue
- }
- i, partseq := i, partseq
- offset, size := int64(partseq)*sliceSize, sliceSize
- if partseq+1 == count {
- size = lastBlockSize
- }
- threadG.Go(func(ctx context.Context) error {
- params := map[string]string{
- "method": "upload",
- "access_token": d.AccessToken,
- "type": "tmpfile",
- "path": path,
- "uploadid": precreateResp.Uploadid,
- "partseq": strconv.Itoa(partseq),
- }
- section := io.NewSectionReader(cache, offset, size)
- err := d.uploadSlice(ctx, precreateResp.UploadURL, params, stream.GetName(), section)
- if err != nil {
- return err
- }
- precreateResp.BlockList[i] = -1
- progress := float64(threadG.Success()+1) * 100 / float64(totalParts+1)
- up(progress)
- return nil
- })
- }
- err = threadG.Wait()
+ // 流式并发上传
+ err = d.uploadChunksStream(ctx, ss, stream, precreateResp, path, sliceSize, count, up)
if err == nil {
break uploadLoop
}
@@ -372,13 +270,19 @@ uploadLoop:
precreateResp.UploadURL = ""
// 覆盖掉旧的进度
base.SaveUploadProgress(d, precreateResp, d.AccessToken, contentMd5)
+
+ // 尝试重新创建 StreamSectionReader(如果流支持重新读取)
+ ss, err = streamPkg.NewStreamSectionReader(stream, int(sliceSize), &up)
+ if err != nil {
+ return nil, err
+ }
continue uploadLoop
}
return nil, err
}
defer up(100)
- // step.3 创建文件
+ // step.4 创建文件
var newFile File
_, err = d.create(path, streamSize, 0, precreateResp.Uploadid, blockListStr, &newFile, mtime, ctime)
if err != nil {
@@ -427,68 +331,6 @@ func (d *BaiduNetdisk) precreate(ctx context.Context, path string, streamSize in
return &precreateResp, nil
}
-func (d *BaiduNetdisk) uploadSlice(ctx context.Context, uploadUrl string, params map[string]string, fileName string, file *io.SectionReader) error {
- b := bytes.NewBuffer(make([]byte, 0, bytes.MinRead))
- mw := multipart.NewWriter(b)
- _, err := mw.CreateFormFile("file", fileName)
- if err != nil {
- return err
- }
- headSize := b.Len()
- err = mw.Close()
- if err != nil {
- return err
- }
- head := bytes.NewReader(b.Bytes()[:headSize])
- tail := bytes.NewReader(b.Bytes()[headSize:])
- rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.MultiReader(head, file, tail))
-
- req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"/rest/2.0/pcs/superfile2", rateLimitedRd)
- if err != nil {
- return err
- }
- query := req.URL.Query()
- for k, v := range params {
- query.Set(k, v)
- }
- req.URL.RawQuery = query.Encode()
- req.Header.Set("Content-Type", mw.FormDataContentType())
- req.ContentLength = int64(b.Len()) + file.Size()
-
- client := net.NewHttpClient()
- if d.UploadSliceTimeout > 0 {
- client.Timeout = time.Second * time.Duration(d.UploadSliceTimeout)
- } else {
- client.Timeout = DEFAULT_UPLOAD_SLICE_TIMEOUT
- }
- resp, err := client.Do(req)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
- b.Reset()
- _, err = b.ReadFrom(resp.Body)
- if err != nil {
- return err
- }
- body := b.Bytes()
- respStr := string(body)
- log.Debugln(respStr)
- lower := strings.ToLower(respStr)
- // 合并 uploadid 过期检测逻辑
- if strings.Contains(lower, "uploadid") &&
- (strings.Contains(lower, "invalid") || strings.Contains(lower, "expired") || strings.Contains(lower, "not found")) {
- return ErrUploadIDExpired
- }
-
- errCode := utils.Json.Get(body, "error_code").ToInt()
- errNo := utils.Json.Get(body, "errno").ToInt()
- if errCode != 0 || errNo != 0 {
- return errs.NewErr(errs.StreamIncomplete, "error uploading to baidu, response=%s", respStr)
- }
- return nil
-}
-
func (d *BaiduNetdisk) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
du, err := d.quota(ctx)
if err != nil {
diff --git a/drivers/baidu_netdisk/meta.go b/drivers/baidu_netdisk/meta.go
index 3f3bed022..499fcd8a8 100644
--- a/drivers/baidu_netdisk/meta.go
+++ b/drivers/baidu_netdisk/meta.go
@@ -31,8 +31,8 @@ type Addition struct {
const (
UPLOAD_FALLBACK_API = "https://d.pcs.baidu.com" // 备用上传地址
UPLOAD_URL_EXPIRE_TIME = time.Minute * 60 // 上传地址有效期(分钟)
- DEFAULT_UPLOAD_SLICE_TIMEOUT = time.Second * 60 // 上传分片请求默认超时时间
- UPLOAD_RETRY_COUNT = 3
+ DEFAULT_UPLOAD_SLICE_TIMEOUT = time.Second * 180 // 上传分片请求默认超时时间(增加到3分钟以应对慢速网络)
+ UPLOAD_RETRY_COUNT = 5 // 增加重试次数以提高成功率
UPLOAD_RETRY_WAIT_TIME = time.Second * 1
UPLOAD_RETRY_MAX_WAIT_TIME = time.Second * 5
)
diff --git a/drivers/baidu_netdisk/upload.go b/drivers/baidu_netdisk/upload.go
new file mode 100644
index 000000000..c160c3a9e
--- /dev/null
+++ b/drivers/baidu_netdisk/upload.go
@@ -0,0 +1,311 @@
+package baidu_netdisk
+
+import (
+ "bytes"
+ "context"
+ "crypto/md5"
+ "encoding/hex"
+ "errors"
+ "io"
+ "mime/multipart"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/OpenListTeam/OpenList/v4/internal/driver"
+ "github.com/OpenListTeam/OpenList/v4/internal/errs"
+ "github.com/OpenListTeam/OpenList/v4/internal/model"
+ "github.com/OpenListTeam/OpenList/v4/internal/net"
+ streamPkg "github.com/OpenListTeam/OpenList/v4/internal/stream"
+ "github.com/OpenListTeam/OpenList/v4/pkg/errgroup"
+ "github.com/OpenListTeam/OpenList/v4/pkg/utils"
+ "github.com/avast/retry-go"
+ log "github.com/sirupsen/logrus"
+)
+
+// calculateHashesStream 流式计算文件的MD5哈希值
+// 返回:文件MD5、前256KB的MD5、每个分片的MD5列表
+// 注意:此函数使用 RangeRead 读取数据,不会消耗流
+func (d *BaiduNetdisk) calculateHashesStream(
+ ctx context.Context,
+ stream model.FileStreamer,
+ sliceSize int64,
+ up *driver.UpdateProgress,
+) (contentMd5 string, sliceMd5 string, blockList []string, err error) {
+ streamSize := stream.GetSize()
+ count := 1
+ if streamSize > sliceSize {
+ count = int((streamSize + sliceSize - 1) / sliceSize)
+ }
+ lastBlockSize := streamSize % sliceSize
+ if lastBlockSize == 0 {
+ lastBlockSize = sliceSize
+ }
+
+ // 前256KB的MD5
+ const SliceSize int64 = 256 * utils.KB
+ blockList = make([]string, 0, count)
+ fileMd5H := md5.New()
+ sliceMd5H2 := md5.New()
+ sliceWritten := int64(0)
+
+ // 使用固定大小的缓冲区进行流式哈希计算
+ // 这样可以利用 readFullWithRangeRead 的链接刷新逻辑
+ const chunkSize = 10 * 1024 * 1024 // 10MB per chunk
+ buf := make([]byte, chunkSize)
+
+ for i := 0; i < count; i++ {
+ if utils.IsCanceled(ctx) {
+ return "", "", nil, ctx.Err()
+ }
+
+ offset := int64(i) * sliceSize
+ length := sliceSize
+ if i == count-1 {
+ length = lastBlockSize
+ }
+
+ // 计算分片MD5
+ sliceMd5Calc := md5.New()
+
+ // 分块读取并计算哈希
+ var sliceOffset int64 = 0
+ for sliceOffset < length {
+ readSize := chunkSize
+ if length-sliceOffset < int64(chunkSize) {
+ readSize = int(length - sliceOffset)
+ }
+
+ // 使用 readFullWithRangeRead 读取数据,自动处理链接刷新
+ n, err := streamPkg.ReadFullWithRangeRead(stream, buf[:readSize], offset+sliceOffset)
+ if err != nil {
+ return "", "", nil, err
+ }
+
+ // 同时写入多个哈希计算器
+ fileMd5H.Write(buf[:n])
+ sliceMd5Calc.Write(buf[:n])
+ if sliceWritten < SliceSize {
+ remaining := SliceSize - sliceWritten
+ if int64(n) > remaining {
+ sliceMd5H2.Write(buf[:remaining])
+ sliceWritten += remaining
+ } else {
+ sliceMd5H2.Write(buf[:n])
+ sliceWritten += int64(n)
+ }
+ }
+
+ sliceOffset += int64(n)
+ }
+
+ blockList = append(blockList, hex.EncodeToString(sliceMd5Calc.Sum(nil)))
+
+ // 更新进度(哈希计算占总进度的一小部分)
+ if up != nil {
+ progress := float64(i+1) * 10 / float64(count)
+ (*up)(progress)
+ }
+ }
+
+ return hex.EncodeToString(fileMd5H.Sum(nil)),
+ hex.EncodeToString(sliceMd5H2.Sum(nil)),
+ blockList, nil
+}
+
+// uploadChunksStream 流式上传所有分片
+func (d *BaiduNetdisk) uploadChunksStream(
+ ctx context.Context,
+ ss streamPkg.StreamSectionReaderIF,
+ stream model.FileStreamer,
+ precreateResp *PrecreateResp,
+ path string,
+ sliceSize int64,
+ count int,
+ up driver.UpdateProgress,
+) error {
+ streamSize := stream.GetSize()
+ lastBlockSize := streamSize % sliceSize
+ if lastBlockSize == 0 {
+ lastBlockSize = sliceSize
+ }
+
+ // 使用 OrderedGroup 保证 Before 阶段有序
+ thread := min(d.uploadThread, len(precreateResp.BlockList))
+ threadG, upCtx := errgroup.NewOrderedGroupWithContext(ctx, thread,
+ retry.Attempts(UPLOAD_RETRY_COUNT),
+ retry.Delay(UPLOAD_RETRY_WAIT_TIME),
+ retry.MaxDelay(UPLOAD_RETRY_MAX_WAIT_TIME),
+ retry.DelayType(retry.BackOffDelay),
+ retry.RetryIf(func(err error) bool {
+ return !errors.Is(err, ErrUploadIDExpired)
+ }),
+ retry.OnRetry(func(n uint, err error) {
+ // 重试前检测是否需要刷新上传 URL
+ if errors.Is(err, ErrUploadURLExpired) {
+ log.Infof("[baidu_netdisk] refreshing upload URL due to error: %v", err)
+ precreateResp.UploadURL = d.getUploadUrl(path, precreateResp.Uploadid)
+ }
+ }),
+ retry.LastErrorOnly(true))
+
+ totalParts := len(precreateResp.BlockList)
+
+ for i, partseq := range precreateResp.BlockList {
+ if utils.IsCanceled(upCtx) {
+ break
+ }
+ if partseq < 0 {
+ continue
+ }
+
+ i, partseq := i, partseq
+ offset := int64(partseq) * sliceSize
+ size := sliceSize
+ if partseq+1 == count {
+ size = lastBlockSize
+ }
+
+ var reader io.ReadSeeker
+
+ threadG.GoWithLifecycle(errgroup.Lifecycle{
+ Before: func(ctx context.Context) error {
+ var err error
+ reader, err = ss.GetSectionReader(offset, size)
+ return err
+ },
+ Do: func(ctx context.Context) error {
+ reader.Seek(0, io.SeekStart)
+ err := d.uploadSliceStream(ctx, precreateResp.UploadURL, path,
+ precreateResp.Uploadid, partseq, stream.GetName(), reader, size)
+ if err != nil {
+ return err
+ }
+ precreateResp.BlockList[i] = -1
+ // 进度从10%开始(前10%是哈希计算)
+ progress := 10 + float64(threadG.Success()+1)*90/float64(totalParts+1)
+ up(progress)
+ return nil
+ },
+ After: func(err error) {
+ ss.FreeSectionReader(reader)
+ },
+ })
+ }
+
+ return threadG.Wait()
+}
+
+// uploadSliceStream 上传单个分片(接受io.ReadSeeker)
+func (d *BaiduNetdisk) uploadSliceStream(
+ ctx context.Context,
+ uploadUrl string,
+ path string,
+ uploadid string,
+ partseq int,
+ fileName string,
+ reader io.ReadSeeker,
+ size int64,
+) error {
+ params := map[string]string{
+ "method": "upload",
+ "access_token": d.AccessToken,
+ "type": "tmpfile",
+ "path": path,
+ "uploadid": uploadid,
+ "partseq": strconv.Itoa(partseq),
+ }
+
+ b := bytes.NewBuffer(make([]byte, 0, bytes.MinRead))
+ mw := multipart.NewWriter(b)
+ _, err := mw.CreateFormFile("file", fileName)
+ if err != nil {
+ return err
+ }
+ headSize := b.Len()
+ err = mw.Close()
+ if err != nil {
+ return err
+ }
+ head := bytes.NewReader(b.Bytes()[:headSize])
+ tail := bytes.NewReader(b.Bytes()[headSize:])
+ rateLimitedRd := driver.NewLimitedUploadStream(ctx, io.MultiReader(head, reader, tail))
+
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, uploadUrl+"/rest/2.0/pcs/superfile2", rateLimitedRd)
+ if err != nil {
+ return err
+ }
+ query := req.URL.Query()
+ for k, v := range params {
+ query.Set(k, v)
+ }
+ req.URL.RawQuery = query.Encode()
+ req.Header.Set("Content-Type", mw.FormDataContentType())
+ req.ContentLength = int64(b.Len()) + size
+
+ client := net.NewHttpClient()
+ if d.UploadSliceTimeout > 0 {
+ client.Timeout = time.Second * time.Duration(d.UploadSliceTimeout)
+ } else {
+ client.Timeout = DEFAULT_UPLOAD_SLICE_TIMEOUT
+ }
+ resp, err := client.Do(req)
+ if err != nil {
+ // 检测超时或网络错误,标记需要刷新上传 URL
+ if isUploadURLError(err) {
+ log.Warnf("[baidu_netdisk] upload slice failed with network error: %v, will refresh upload URL", err)
+ return errors.Join(err, ErrUploadURLExpired)
+ }
+ return err
+ }
+ defer resp.Body.Close()
+ b.Reset()
+ _, err = b.ReadFrom(resp.Body)
+ if err != nil {
+ return err
+ }
+ body := b.Bytes()
+ respStr := string(body)
+ log.Debugln(respStr)
+ lower := strings.ToLower(respStr)
+ // 合并 uploadid 过期检测逻辑
+ if strings.Contains(lower, "uploadid") &&
+ (strings.Contains(lower, "invalid") || strings.Contains(lower, "expired") || strings.Contains(lower, "not found")) {
+ return ErrUploadIDExpired
+ }
+
+ errCode := utils.Json.Get(body, "error_code").ToInt()
+ errNo := utils.Json.Get(body, "errno").ToInt()
+ if errCode != 0 || errNo != 0 {
+ return errs.NewErr(errs.StreamIncomplete, "error uploading to baidu, response=%s", respStr)
+ }
+ return nil
+}
+
+// isUploadURLError 判断是否为需要刷新上传 URL 的错误
+// 包括:超时、连接被拒绝、连接重置、DNS 解析失败等网络错误
+func isUploadURLError(err error) bool {
+ if err == nil {
+ return false
+ }
+ errStr := strings.ToLower(err.Error())
+ // 超时错误
+ if strings.Contains(errStr, "timeout") ||
+ strings.Contains(errStr, "deadline exceeded") {
+ return true
+ }
+ // 连接错误
+ if strings.Contains(errStr, "connection refused") ||
+ strings.Contains(errStr, "connection reset") ||
+ strings.Contains(errStr, "no such host") ||
+ strings.Contains(errStr, "network is unreachable") {
+ return true
+ }
+ // EOF 错误(连接被服务器关闭)
+ if strings.Contains(errStr, "eof") ||
+ strings.Contains(errStr, "broken pipe") {
+ return true
+ }
+ return false
+}
diff --git a/drivers/baidu_netdisk/util.go b/drivers/baidu_netdisk/util.go
index 0e27fb305..75018a708 100644
--- a/drivers/baidu_netdisk/util.go
+++ b/drivers/baidu_netdisk/util.go
@@ -207,7 +207,24 @@ func (d *BaiduNetdisk) linkOfficial(file model.Obj, _ model.LinkArgs) (*model.Li
return nil, err
}
u := fmt.Sprintf("%s&access_token=%s", resp.List[0].Dlink, d.AccessToken)
- res, err := base.NoRedirectClient.R().SetHeader("User-Agent", "pan.baidu.com").Head(u)
+
+ // Retry HEAD request with longer timeout to avoid client-side errors
+ // Create a client with longer timeout (base.NoRedirectClient doesn't have timeout set)
+ client := base.NoRedirectClient.SetTimeout(60 * time.Second)
+ var res *resty.Response
+ maxRetries := 5
+ for i := 0; i < maxRetries; i++ {
+ res, err = client.R().
+ SetHeader("User-Agent", "pan.baidu.com").
+ Head(u)
+ if err == nil {
+ break
+ }
+ if i < maxRetries-1 {
+ log.Warnf("HEAD request failed (attempt %d/%d): %v, retrying...", i+1, maxRetries, err)
+ time.Sleep(time.Duration(i+1) * 2 * time.Second) // Exponential backoff: 2s, 4s, 6s, 8s
+ }
+ }
if err != nil {
return nil, err
}
diff --git a/drivers/cloudreve_v4/driver.go b/drivers/cloudreve_v4/driver.go
index a1d301635..cd5cf1b3b 100644
--- a/drivers/cloudreve_v4/driver.go
+++ b/drivers/cloudreve_v4/driver.go
@@ -129,15 +129,7 @@ func (d *CloudreveV4) List(ctx context.Context, dir model.Obj, args model.ListAr
}
}
return &model.ObjThumb{
- Object: model.Object{
- ID: src.ID,
- Path: src.Path,
- Name: src.Name,
- Size: src.Size,
- Modified: src.UpdatedAt,
- Ctime: src.CreatedAt,
- IsFolder: src.Type == 1,
- },
+ Object: *fileToObject(&src),
Thumbnail: thumb,
}, nil
})
@@ -151,14 +143,7 @@ func (d *CloudreveV4) Get(ctx context.Context, path string) (model.Obj, error) {
if err != nil {
return nil, err
}
- return &model.Object{
- ID: info.ID,
- Path: info.Path,
- Name: info.Name,
- Size: info.Size,
- Modified: info.UpdatedAt,
- Ctime: info.CreatedAt,
- }, nil
+ return fileToObject(&info), nil
}
func (d *CloudreveV4) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
diff --git a/drivers/cloudreve_v4/types.go b/drivers/cloudreve_v4/types.go
index 23335042f..b67cfc862 100644
--- a/drivers/cloudreve_v4/types.go
+++ b/drivers/cloudreve_v4/types.go
@@ -122,6 +122,18 @@ type File struct {
PrimaryEntity string `json:"primary_entity"`
}
+func fileToObject(f *File) *model.Object {
+ return &model.Object{
+ ID: f.ID,
+ Path: f.Path,
+ Name: f.Name,
+ Size: f.Size,
+ Modified: f.UpdatedAt,
+ Ctime: f.CreatedAt,
+ IsFolder: f.Type == 1,
+ }
+}
+
type StoragePolicy struct {
ID string `json:"id"`
Name string `json:"name"`
diff --git a/drivers/cloudreve_v4/util.go b/drivers/cloudreve_v4/util.go
index 853df9ad6..f8fe5f269 100644
--- a/drivers/cloudreve_v4/util.go
+++ b/drivers/cloudreve_v4/util.go
@@ -16,6 +16,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
+ "github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/setting"
@@ -30,6 +31,7 @@ import (
const (
CodeLoginRequired = http.StatusUnauthorized
+ CodePathNotExist = 40016 // Path not exist
CodeCredentialInvalid = 40020 // Failed to issue token
)
@@ -101,6 +103,9 @@ func (d *CloudreveV4) _request(method string, path string, callback base.ReqCall
if r.Code == CodeCredentialInvalid {
return ErrorIssueToken
}
+ if r.Code == CodePathNotExist {
+ return errs.ObjectNotFound
+ }
return fmt.Errorf("%d: %s", r.Code, r.Msg)
}
diff --git a/drivers/google_drive/driver.go b/drivers/google_drive/driver.go
index 94ef854f2..61c182bfb 100644
--- a/drivers/google_drive/driver.go
+++ b/drivers/google_drive/driver.go
@@ -5,15 +5,26 @@ import (
"fmt"
"net/http"
"strconv"
+ "strings"
+ "sync"
+ "time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
+ "github.com/OpenListTeam/OpenList/v4/internal/stream"
+ "github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
+ "github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
+ log "github.com/sirupsen/logrus"
)
+// mkdirLocks prevents race conditions when creating folders with the same name
+// Google Drive allows duplicate folder names, so we need application-level locking
+var mkdirLocks sync.Map // map[string]*sync.Mutex - key is parentID + "/" + dirName
+
type GoogleDrive struct {
model.Storage
Addition
@@ -67,15 +78,76 @@ func (d *GoogleDrive) Link(ctx context.Context, file model.Obj, args model.LinkA
}
func (d *GoogleDrive) MakeDir(ctx context.Context, parentDir model.Obj, dirName string) error {
+ // Use per-folder lock to prevent concurrent creation of same folder
+ // This is critical because Google Drive allows duplicate folder names
+ lockKey := parentDir.GetID() + "/" + dirName
+ lockVal, _ := mkdirLocks.LoadOrStore(lockKey, &sync.Mutex{})
+ lock := lockVal.(*sync.Mutex)
+ lock.Lock()
+ defer lock.Unlock()
+
+ // Check if folder already exists with retry to handle API eventual consistency
+ escapedDirName := strings.ReplaceAll(dirName, "'", "\\'")
+ query := map[string]string{
+ "q": fmt.Sprintf("name='%s' and '%s' in parents and mimeType='application/vnd.google-apps.folder' and trashed=false", escapedDirName, parentDir.GetID()),
+ "fields": "files(id)",
+ }
+
+ var existingFiles Files
+ err := retry.Do(func() error {
+ var checkErr error
+ _, checkErr = d.request("https://www.googleapis.com/drive/v3/files", http.MethodGet, func(req *resty.Request) {
+ req.SetQueryParams(query)
+ }, &existingFiles)
+ return checkErr
+ },
+ retry.Context(ctx),
+ retry.Attempts(3),
+ retry.DelayType(retry.BackOffDelay),
+ retry.Delay(200*time.Millisecond),
+ )
+
+ // If query succeeded and folder exists, return success (idempotent)
+ if err == nil && len(existingFiles.Files) > 0 {
+ log.Debugf("[google_drive] Folder '%s' already exists in parent %s, skipping creation", dirName, parentDir.GetID())
+ return nil
+ }
+ // If query failed, return error to prevent duplicate creation
+ if err != nil {
+ return fmt.Errorf("failed to check existing folder '%s': %w", dirName, err)
+ }
+
+ // Create new folder (only when confirmed folder doesn't exist)
data := base.Json{
"name": dirName,
"parents": []string{parentDir.GetID()},
"mimeType": "application/vnd.google-apps.folder",
}
- _, err := d.request("https://www.googleapis.com/drive/v3/files", http.MethodPost, func(req *resty.Request) {
- req.SetBody(data)
- }, nil)
- return err
+
+ var createErr error
+ err = retry.Do(func() error {
+ _, createErr = d.request("https://www.googleapis.com/drive/v3/files", http.MethodPost, func(req *resty.Request) {
+ req.SetBody(data)
+ }, nil)
+ return createErr
+ },
+ retry.Context(ctx),
+ retry.Attempts(3),
+ retry.DelayType(retry.BackOffDelay),
+ retry.Delay(500*time.Millisecond),
+ )
+
+ if err != nil {
+ return err
+ }
+
+ // Wait for API eventual consistency before releasing lock
+ // This helps prevent race conditions where a concurrent request
+ // checks for folder existence before the newly created folder is visible
+ // 500ms is needed because Google Drive API has significant sync delay
+ time.Sleep(500 * time.Millisecond)
+
+ return nil
}
func (d *GoogleDrive) Move(ctx context.Context, srcObj, dstDir model.Obj) error {
@@ -111,8 +183,44 @@ func (d *GoogleDrive) Remove(ctx context.Context, obj model.Obj) error {
return err
}
-func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
- obj := stream.GetExist()
+func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) error {
+ // 1. 准备MD5(用于完整性校验)
+ md5Hash := file.GetHash().GetHash(utils.MD5)
+
+ // 检查是否是可重复读取的流
+ _, isSeekable := file.(*stream.SeekableStream)
+
+ if isSeekable {
+ // 可重复读取的流,使用 RangeRead 计算 hash,不缓存
+ if len(md5Hash) != utils.MD5.Width {
+ var err error
+ md5Hash, err = stream.StreamHashFile(file, utils.MD5, 10, &up)
+ if err != nil {
+ return err
+ }
+ _ = md5Hash // MD5用于后续完整性校验(Google Drive会自动校验)
+ }
+ } else {
+ // 不可重复读取的流(如 HTTP body)
+ if len(md5Hash) != utils.MD5.Width {
+ // 缓存整个文件并计算 MD5
+ var err error
+ _, md5Hash, err = stream.CacheFullAndHash(file, &up, utils.MD5)
+ if err != nil {
+ return err
+ }
+ _ = md5Hash // MD5用于后续完整性校验
+ } else if file.GetFile() == nil {
+ // 有 MD5 但没有缓存,需要缓存以支持后续 RangeRead
+ _, err := file.CacheFullAndWriter(&up, nil)
+ if err != nil {
+ return err
+ }
+ }
+ }
+
+ // 2. 初始化可恢复上传会话
+ obj := file.GetExist()
var (
e Error
url string
@@ -125,7 +233,7 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
data = base.Json{}
} else {
data = base.Json{
- "name": stream.GetName(),
+ "name": file.GetName(),
"parents": []string{dstDir.GetID()},
}
url = "https://www.googleapis.com/upload/drive/v3/files?uploadType=resumable&supportsAllDrives=true"
@@ -133,8 +241,8 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
req := base.NoRedirectClient.R().
SetHeaders(map[string]string{
"Authorization": "Bearer " + d.AccessToken,
- "X-Upload-Content-Type": stream.GetMimetype(),
- "X-Upload-Content-Length": strconv.FormatInt(stream.GetSize(), 10),
+ "X-Upload-Content-Type": file.GetMimetype(),
+ "X-Upload-Content-Length": strconv.FormatInt(file.GetSize(), 10),
}).
SetError(&e).SetBody(data).SetContext(ctx)
if obj != nil {
@@ -151,20 +259,29 @@ func (d *GoogleDrive) Put(ctx context.Context, dstDir model.Obj, stream model.Fi
if err != nil {
return err
}
- return d.Put(ctx, dstDir, stream, up)
+ return d.Put(ctx, dstDir, file, up)
}
return fmt.Errorf("%s: %v", e.Error.Message, e.Error.Errors)
}
+
+ // 3. 上传文件内容
putUrl := res.Header().Get("location")
- if stream.GetSize() < d.ChunkSize*1024*1024 {
+ if file.GetSize() < d.ChunkSize*1024*1024 {
+ // 小文件上传:使用 RangeRead 读取整个文件(避免消费已计算hash的stream)
+ reader, err := file.RangeRead(http_range.Range{Start: 0, Length: file.GetSize()})
+ if err != nil {
+ return err
+ }
+
_, err = d.request(putUrl, http.MethodPut, func(req *resty.Request) {
- req.SetHeader("Content-Length", strconv.FormatInt(stream.GetSize(), 10)).
- SetBody(driver.NewLimitedUploadStream(ctx, stream))
+ req.SetHeader("Content-Length", strconv.FormatInt(file.GetSize(), 10)).
+ SetBody(driver.NewLimitedUploadStream(ctx, reader))
}, nil)
+ return err
} else {
- err = d.chunkUpload(ctx, stream, putUrl, up)
+ // 大文件分片上传
+ return d.chunkUpload(ctx, file, putUrl, up)
}
- return err
}
func (d *GoogleDrive) GetDetails(ctx context.Context) (*model.StorageDetails, error) {
diff --git a/drivers/google_drive/util.go b/drivers/google_drive/util.go
index 042abafa4..1fc68c335 100644
--- a/drivers/google_drive/util.go
+++ b/drivers/google_drive/util.go
@@ -296,9 +296,60 @@ func (d *GoogleDrive) getFiles(id string) ([]File, error) {
res = append(res, resp.Files...)
}
+
+ // Handle duplicate filenames by adding suffixes like (1), (2), etc.
+ // Google Drive allows multiple files with the same name in one folder,
+ // but OpenList uses path-based file system which requires unique names
+ res = handleDuplicateNames(res)
+
return res, nil
}
+// handleDuplicateNames adds suffixes to duplicate filenames to make them unique
+// For example: file.txt, file (1).txt, file (2).txt
+func handleDuplicateNames(files []File) []File {
+ if len(files) <= 1 {
+ return files
+ }
+
+ // Track how many files with each name we've seen
+ nameCount := make(map[string]int)
+
+ // First pass: count occurrences of each name
+ for _, file := range files {
+ nameCount[file.Name]++
+ }
+
+ // Second pass: add suffixes to duplicates
+ nameIndex := make(map[string]int)
+ for i := range files {
+ name := files[i].Name
+ if nameCount[name] > 1 {
+ index := nameIndex[name]
+ nameIndex[name]++
+
+ if index > 0 {
+ // Add suffix for all except the first occurrence
+ // Split name into base and extension
+ ext := ""
+ base := name
+ for j := len(name) - 1; j >= 0; j-- {
+ if name[j] == '.' {
+ ext = name[j:]
+ base = name[:j]
+ break
+ }
+ }
+
+ // Add (1), (2), etc. suffix
+ files[i].Name = fmt.Sprintf("%s (%d)%s", base, index, ext)
+ }
+ }
+ }
+
+ return files
+}
+
// getTargetFileInfo gets target file details for shortcuts
func (d *GoogleDrive) getTargetFileInfo(targetId string) (File, error) {
var targetFile File
diff --git a/drivers/openlist/driver.go b/drivers/openlist/driver.go
index 2ca60ff61..9b69bbeb5 100644
--- a/drivers/openlist/driver.go
+++ b/drivers/openlist/driver.go
@@ -14,6 +14,8 @@ import (
"github.com/OpenListTeam/OpenList/v4/internal/driver"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
+ "github.com/OpenListTeam/OpenList/v4/internal/stream"
+ "github.com/OpenListTeam/OpenList/v4/pkg/http_range"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
"github.com/go-resty/resty/v2"
@@ -195,6 +197,92 @@ func (d *OpenList) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *OpenList) Put(ctx context.Context, dstDir model.Obj, s model.FileStreamer, up driver.UpdateProgress) error {
+ // 预计算 hash(如果不存在),使用 RangeRead 不消耗 Reader
+ // 这样远端驱动不需要再计算,避免 HTTP body 被重复读取
+ md5Hash := s.GetHash().GetHash(utils.MD5)
+ sha1Hash := s.GetHash().GetHash(utils.SHA1)
+ sha256Hash := s.GetHash().GetHash(utils.SHA256)
+ sha1_128kHash := s.GetHash().GetHash(utils.SHA1_128K)
+ preHash := s.GetHash().GetHash(utils.PRE_HASH)
+
+ // 计算所有缺失的 hash,确保最大兼容性
+ if len(md5Hash) != utils.MD5.Width {
+ var err error
+ md5Hash, err = stream.StreamHashFile(s, utils.MD5, 33, &up)
+ if err != nil {
+ log.Warnf("[openlist] failed to pre-calculate MD5: %v", err)
+ md5Hash = ""
+ }
+ }
+ if len(sha1Hash) != utils.SHA1.Width {
+ var err error
+ sha1Hash, err = stream.StreamHashFile(s, utils.SHA1, 33, &up)
+ if err != nil {
+ log.Warnf("[openlist] failed to pre-calculate SHA1: %v", err)
+ sha1Hash = ""
+ }
+ }
+ if len(sha256Hash) != utils.SHA256.Width {
+ var err error
+ sha256Hash, err = stream.StreamHashFile(s, utils.SHA256, 34, &up)
+ if err != nil {
+ log.Warnf("[openlist] failed to pre-calculate SHA256: %v", err)
+ sha256Hash = ""
+ }
+ }
+
+ // 计算特殊 hash(用于秒传验证)
+ // SHA1_128K: 前128KB的SHA1,115网盘使用
+ if len(sha1_128kHash) != utils.SHA1_128K.Width {
+ const PreHashSize int64 = 128 * 1024 // 128KB
+ hashSize := PreHashSize
+ if s.GetSize() < PreHashSize {
+ hashSize = s.GetSize()
+ }
+ reader, err := s.RangeRead(http_range.Range{Start: 0, Length: hashSize})
+ if err == nil {
+ sha1_128kHash, err = utils.HashReader(utils.SHA1, reader)
+ if closer, ok := reader.(io.Closer); ok {
+ _ = closer.Close()
+ }
+ if err != nil {
+ log.Warnf("[openlist] failed to pre-calculate SHA1_128K: %v", err)
+ sha1_128kHash = ""
+ }
+ } else {
+ log.Warnf("[openlist] failed to RangeRead for SHA1_128K: %v", err)
+ }
+ }
+
+ // PRE_HASH: 前1024字节的SHA1,阿里云盘使用
+ if len(preHash) != utils.PRE_HASH.Width {
+ const PreHashSize int64 = 1024 // 1KB
+ hashSize := PreHashSize
+ if s.GetSize() < PreHashSize {
+ hashSize = s.GetSize()
+ }
+ reader, err := s.RangeRead(http_range.Range{Start: 0, Length: hashSize})
+ if err == nil {
+ preHash, err = utils.HashReader(utils.SHA1, reader)
+ if closer, ok := reader.(io.Closer); ok {
+ _ = closer.Close()
+ }
+ if err != nil {
+ log.Warnf("[openlist] failed to pre-calculate PRE_HASH: %v", err)
+ preHash = ""
+ }
+ } else {
+ log.Warnf("[openlist] failed to RangeRead for PRE_HASH: %v", err)
+ }
+ }
+
+ // 诊断日志:检查流的状态
+ if ss, ok := s.(*stream.SeekableStream); ok {
+ if ss.Reader != nil {
+ log.Warnf("[openlist] WARNING: SeekableStream.Reader is not nil for file %s, stream may have been consumed!", s.GetName())
+ }
+ }
+
reader := driver.NewLimitedUploadStream(ctx, &driver.ReaderUpdatingProgress{
Reader: s,
UpdateProgress: up,
@@ -206,14 +294,20 @@ func (d *OpenList) Put(ctx context.Context, dstDir model.Obj, s model.FileStream
req.Header.Set("Authorization", d.Token)
req.Header.Set("File-Path", path.Join(dstDir.GetPath(), s.GetName()))
req.Header.Set("Password", d.MetaPassword)
- if md5 := s.GetHash().GetHash(utils.MD5); len(md5) > 0 {
- req.Header.Set("X-File-Md5", md5)
+ if len(md5Hash) > 0 {
+ req.Header.Set("X-File-Md5", md5Hash)
+ }
+ if len(sha1Hash) > 0 {
+ req.Header.Set("X-File-Sha1", sha1Hash)
+ }
+ if len(sha256Hash) > 0 {
+ req.Header.Set("X-File-Sha256", sha256Hash)
}
- if sha1 := s.GetHash().GetHash(utils.SHA1); len(sha1) > 0 {
- req.Header.Set("X-File-Sha1", sha1)
+ if len(sha1_128kHash) > 0 {
+ req.Header.Set("X-File-Sha1-128k", sha1_128kHash)
}
- if sha256 := s.GetHash().GetHash(utils.SHA256); len(sha256) > 0 {
- req.Header.Set("X-File-Sha256", sha256)
+ if len(preHash) > 0 {
+ req.Header.Set("X-File-Pre-Hash", preHash)
}
req.ContentLength = s.GetSize()
diff --git a/drivers/quark_open/driver.go b/drivers/quark_open/driver.go
index f0b8baf09..26a4288cc 100644
--- a/drivers/quark_open/driver.go
+++ b/drivers/quark_open/driver.go
@@ -8,6 +8,7 @@ import (
"hash"
"io"
"net/http"
+ "strings"
"time"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
@@ -18,15 +19,23 @@ import (
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
+ log "github.com/sirupsen/logrus"
+ "golang.org/x/time/rate"
)
type QuarkOpen struct {
model.Storage
Addition
- config driver.Config
- conf Conf
+ config driver.Config
+ conf Conf
+ limiter *rate.Limiter
}
+// 速率限制常量:夸克开放平台限流,保守设置
+const (
+ quarkRateLimit = 2.0 // 每秒2个请求,避免限流
+)
+
func (d *QuarkOpen) Config() driver.Config {
return d.config
}
@@ -36,6 +45,9 @@ func (d *QuarkOpen) GetAddition() driver.Additional {
}
func (d *QuarkOpen) Init(ctx context.Context) error {
+ // 初始化速率限制器
+ d.limiter = rate.NewLimiter(rate.Limit(quarkRateLimit), 1)
+
var resp UserInfoResp
_, err := d.request(ctx, "/open/v1/user/info", http.MethodGet, nil, &resp)
@@ -52,11 +64,22 @@ func (d *QuarkOpen) Init(ctx context.Context) error {
return err
}
+// waitLimit 等待速率限制
+func (d *QuarkOpen) waitLimit(ctx context.Context) error {
+ if d.limiter != nil {
+ return d.limiter.Wait(ctx)
+ }
+ return nil
+}
+
func (d *QuarkOpen) Drop(ctx context.Context) error {
return nil
}
func (d *QuarkOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
+ if err := d.waitLimit(ctx); err != nil {
+ return nil, err
+ }
files, err := d.GetFiles(ctx, dir.GetID())
if err != nil {
return nil, err
@@ -67,6 +90,9 @@ func (d *QuarkOpen) List(ctx context.Context, dir model.Obj, args model.ListArgs
}
func (d *QuarkOpen) Link(ctx context.Context, file model.Obj, args model.LinkArgs) (*model.Link, error) {
+ if err := d.waitLimit(ctx); err != nil {
+ return nil, err
+ }
data := base.Json{
"fid": file.GetID(),
}
@@ -143,35 +169,116 @@ func (d *QuarkOpen) Remove(ctx context.Context, obj model.Obj) error {
}
func (d *QuarkOpen) Put(ctx context.Context, dstDir model.Obj, stream model.FileStreamer, up driver.UpdateProgress) error {
- md5Str, sha1Str := stream.GetHash().GetHash(utils.MD5), stream.GetHash().GetHash(utils.SHA1)
- var (
- md5 hash.Hash
- sha1 hash.Hash
- )
- writers := []io.Writer{}
- if len(md5Str) != utils.MD5.Width {
- md5 = utils.MD5.NewFunc()
- writers = append(writers, md5)
- }
- if len(sha1Str) != utils.SHA1.Width {
- sha1 = utils.SHA1.NewFunc()
- writers = append(writers, sha1)
+ if err := d.waitLimit(ctx); err != nil {
+ return err
}
+ md5Str, sha1Str := stream.GetHash().GetHash(utils.MD5), stream.GetHash().GetHash(utils.SHA1)
- if len(writers) > 0 {
- _, err := stream.CacheFullAndWriter(&up, io.MultiWriter(writers...))
- if err != nil {
- return err
- }
- if md5 != nil {
- md5Str = hex.EncodeToString(md5.Sum(nil))
- }
- if sha1 != nil {
- sha1Str = hex.EncodeToString(sha1.Sum(nil))
+ // 检查是否需要计算hash
+ needMD5 := len(md5Str) != utils.MD5.Width
+ needSHA1 := len(sha1Str) != utils.SHA1.Width
+
+ if needMD5 || needSHA1 {
+ // 检查是否为可重复读取的流
+ _, isSeekable := stream.(*streamPkg.SeekableStream)
+
+ if isSeekable {
+ // 可重复读取的流,使用 RangeRead 一次性计算所有hash,避免重复读取
+ var md5 hash.Hash
+ var sha1 hash.Hash
+ writers := []io.Writer{}
+
+ if needMD5 {
+ md5 = utils.MD5.NewFunc()
+ writers = append(writers, md5)
+ }
+ if needSHA1 {
+ sha1 = utils.SHA1.NewFunc()
+ writers = append(writers, sha1)
+ }
+
+ // 使用 RangeRead 分块读取文件,同时计算多个hash
+ multiWriter := io.MultiWriter(writers...)
+ size := stream.GetSize()
+ chunkSize := int64(10 * utils.MB) // 10MB per chunk
+ buf := make([]byte, chunkSize)
+ var offset int64 = 0
+
+ for offset < size {
+ readSize := min(chunkSize, size-offset)
+
+ n, err := streamPkg.ReadFullWithRangeRead(stream, buf[:readSize], offset)
+ if err != nil {
+ return fmt.Errorf("calculate hash failed at offset %d: %w", offset, err)
+ }
+
+ multiWriter.Write(buf[:n])
+ offset += int64(n)
+
+ // 更新进度(hash计算占用40%的进度)
+ up(40 * float64(offset) / float64(size))
+ }
+
+ if md5 != nil {
+ md5Str = hex.EncodeToString(md5.Sum(nil))
+ }
+ if sha1 != nil {
+ sha1Str = hex.EncodeToString(sha1.Sum(nil))
+ }
+ } else {
+ // 不可重复读取的流(如网络流),需要缓存并计算hash
+ var md5 hash.Hash
+ var sha1 hash.Hash
+ writers := []io.Writer{}
+
+ if needMD5 {
+ md5 = utils.MD5.NewFunc()
+ writers = append(writers, md5)
+ }
+ if needSHA1 {
+ sha1 = utils.SHA1.NewFunc()
+ writers = append(writers, sha1)
+ }
+
+ _, err := stream.CacheFullAndWriter(&up, io.MultiWriter(writers...))
+ if err != nil {
+ return err
+ }
+
+ if md5 != nil {
+ md5Str = hex.EncodeToString(md5.Sum(nil))
+ }
+ if sha1 != nil {
+ sha1Str = hex.EncodeToString(sha1.Sum(nil))
+ }
}
}
- // pre
- pre, err := d.upPre(ctx, stream, dstDir.GetID(), md5Str, sha1Str)
+ // pre - 带有 proof fail 重试逻辑
+ var pre UpPreResp
+ var err error
+ err = retry.Do(func() error {
+ var preErr error
+ pre, preErr = d.upPre(ctx, stream, dstDir.GetID(), md5Str, sha1Str)
+ if preErr != nil {
+ // 检查是否为 proof fail 错误
+ if strings.Contains(preErr.Error(), "proof") || strings.Contains(preErr.Error(), "43010") {
+ log.Warnf("[quark_open] Proof verification failed, retrying: %v", preErr)
+ return preErr // 返回错误触发重试
+ }
+ // 检查是否为限流错误
+ if strings.Contains(preErr.Error(), "限流") || strings.Contains(preErr.Error(), "rate") {
+ log.Warnf("[quark_open] Rate limited, waiting before retry: %v", preErr)
+ time.Sleep(2 * time.Second) // 额外等待
+ return preErr
+ }
+ }
+ return preErr
+ },
+ retry.Context(ctx),
+ retry.Attempts(3),
+ retry.DelayType(retry.BackOffDelay),
+ retry.Delay(500*time.Millisecond),
+ )
if err != nil {
return err
}
@@ -181,16 +288,70 @@ func (d *QuarkOpen) Put(ctx context.Context, dstDir model.Obj, stream model.File
return nil
}
- // get part info
- partInfo := d._getPartInfo(stream, pre.Data.PartSize)
- // get upload url info
- upUrlInfo, err := d.upUrl(ctx, pre, partInfo)
- if err != nil {
+ // 空文件特殊处理:跳过分片上传,直接调用 upFinish
+ // 由于夸克 API 对空文件处理不稳定,尝试完成上传,失败则直接成功返回
+ if stream.GetSize() == 0 {
+ log.Infof("[quark_open] Empty file detected, attempting direct finish (task_id: %s)", pre.Data.TaskID)
+ err = d.upFinish(ctx, pre, []base.Json{}, []string{})
+ if err != nil {
+ // 空文件 upFinish 失败,可能是 API 不支持,直接视为成功
+ log.Warnf("[quark_open] Empty file upFinish failed: %v, treating as success", err)
+ }
+ up(100)
+ return nil
+ }
+
+ // 带重试的分片大小调整逻辑:如果检测到 "part list exceed" 错误,自动翻倍分片大小
+ var upUrlInfo UpUrlInfo
+ var partInfo []base.Json
+ currentPartSize := pre.Data.PartSize
+ const maxRetries = 5
+ const maxPartSize = 1024 * utils.MB // 1GB 上限
+
+ for attempt := 0; attempt < maxRetries; attempt++ {
+ // 计算分片信息
+ partInfo = d._getPartInfo(stream, currentPartSize)
+
+ // 尝试获取上传 URL
+ upUrlInfo, err = d.upUrl(ctx, pre, partInfo)
+ if err == nil {
+ // 成功获取上传 URL
+ log.Infof("[quark_open] Successfully obtained upload URLs with part size: %d MB (%d parts)",
+ currentPartSize/(1024*1024), len(partInfo))
+ break
+ }
+
+ // 检查是否为分片超限错误
+ if strings.Contains(err.Error(), "exceed") {
+ if attempt < maxRetries-1 {
+ // 还有重试机会,翻倍分片大小
+ newPartSize := currentPartSize * 2
+
+ // 检查是否超过上限
+ if newPartSize > maxPartSize {
+ return fmt.Errorf("part list exceeded and cannot increase part size (current: %d MB, max: %d MB). File may be too large for Quark API",
+ currentPartSize/(1024*1024), maxPartSize/(1024*1024))
+ }
+
+ log.Warnf("[quark_open] Part list exceeded (attempt %d/%d, %d parts). Retrying with doubled part size: %d MB -> %d MB",
+ attempt+1, maxRetries, len(partInfo),
+ currentPartSize/(1024*1024), newPartSize/(1024*1024))
+
+ currentPartSize = newPartSize
+ continue // 重试
+ } else {
+ // 已达到最大重试次数
+ return fmt.Errorf("part list exceeded after %d retries. Last attempt: part size %d MB, %d parts",
+ maxRetries, currentPartSize/(1024*1024), len(partInfo))
+ }
+ }
+
+ // 其他错误,直接返回
return err
}
- // part up
- ss, err := streamPkg.NewStreamSectionReader(stream, int(pre.Data.PartSize), &up)
+ // part up - 使用调整后的 currentPartSize
+ ss, err := streamPkg.NewStreamSectionReader(stream, int(currentPartSize), &up)
if err != nil {
return err
}
@@ -204,30 +365,49 @@ func (d *QuarkOpen) Put(ctx context.Context, dstDir model.Obj, stream model.File
return ctx.Err()
}
- offset := int64(i) * pre.Data.PartSize
- size := min(pre.Data.PartSize, total-offset)
+ offset := int64(i) * currentPartSize
+ size := min(currentPartSize, total-offset)
rd, err := ss.GetSectionReader(offset, size)
if err != nil {
return err
}
+
+ // 上传重试逻辑,包含URL刷新
+ var etag string
err = retry.Do(func() error {
rd.Seek(0, io.SeekStart)
- etag, err := d.upPart(ctx, upUrlInfo, i, driver.NewLimitedUploadStream(ctx, rd))
- if err != nil {
- return err
+ var uploadErr error
+ etag, uploadErr = d.upPart(ctx, upUrlInfo, i, driver.NewLimitedUploadStream(ctx, rd))
+
+ // 检查是否为URL过期错误
+ if uploadErr != nil && strings.Contains(uploadErr.Error(), "expire") {
+ log.Warnf("[quark_open] Upload URL expired for part %d, refreshing...", i)
+ // 刷新上传URL
+ newUpUrlInfo, refreshErr := d.upUrl(ctx, pre, partInfo)
+ if refreshErr != nil {
+ return fmt.Errorf("failed to refresh upload url: %w", refreshErr)
+ }
+ upUrlInfo = newUpUrlInfo
+ log.Infof("[quark_open] Upload URL refreshed successfully")
+
+ // 使用新URL重试上传
+ rd.Seek(0, io.SeekStart)
+ etag, uploadErr = d.upPart(ctx, upUrlInfo, i, driver.NewLimitedUploadStream(ctx, rd))
}
- etags = append(etags, etag)
- return nil
+
+ return uploadErr
},
retry.Context(ctx),
retry.Attempts(3),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second))
+
ss.FreeSectionReader(rd)
if err != nil {
return fmt.Errorf("failed to upload part %d: %w", i, err)
}
+ etags = append(etags, etag)
up(95 * float64(offset+size) / float64(total))
}
diff --git a/drivers/quark_open/meta.go b/drivers/quark_open/meta.go
index 3527b52e9..ee1903939 100644
--- a/drivers/quark_open/meta.go
+++ b/drivers/quark_open/meta.go
@@ -13,8 +13,8 @@ type Addition struct {
APIAddress string `json:"api_url_address" default:"https://api.oplist.org/quarkyun/renewapi"`
AccessToken string `json:"access_token" required:"false" default:""`
RefreshToken string `json:"refresh_token" required:"true"`
- AppID string `json:"app_id" required:"true" help:"Keep it empty if you don't have one"`
- SignKey string `json:"sign_key" required:"true" help:"Keep it empty if you don't have one"`
+ AppID string `json:"app_id" required:"false" default:"" help:"Optional - Auto-filled from online API, or use your own"`
+ SignKey string `json:"sign_key" required:"false" default:"" help:"Optional - Auto-filled from online API, or use your own"`
}
type Conf struct {
diff --git a/drivers/quark_open/util.go b/drivers/quark_open/util.go
index 788ca0e99..1a3058375 100644
--- a/drivers/quark_open/util.go
+++ b/drivers/quark_open/util.go
@@ -20,6 +20,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/drivers/base"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
+ "github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
log "github.com/sirupsen/logrus"
)
@@ -283,8 +284,15 @@ func (d *QuarkOpen) getProofRange(proofSeed string, fileSize int64) (*ProofRange
func (d *QuarkOpen) _getPartInfo(stream model.FileStreamer, partSize int64) []base.Json {
// 计算分片信息
- partInfo := make([]base.Json, 0)
total := stream.GetSize()
+
+ // 确保partSize合理:最小4MB,避免分片过多
+ const minPartSize int64 = 4 * utils.MB
+ if partSize < minPartSize {
+ partSize = minPartSize
+ }
+
+ partInfo := make([]base.Json, 0)
left := total
partNumber := 1
@@ -304,6 +312,7 @@ func (d *QuarkOpen) _getPartInfo(stream model.FileStreamer, partSize int64) []ba
partNumber++
}
+ log.Infof("[quark_open] Upload plan: file_size=%d, part_size=%d, part_count=%d", total, partSize, len(partInfo))
return partInfo
}
@@ -315,11 +324,17 @@ func (d *QuarkOpen) upUrl(ctx context.Context, pre UpPreResp, partInfo []base.Js
}
var resp UpUrlResp
+ log.Infof("[quark_open] Requesting upload URLs for %d parts (task_id: %s)", len(partInfo), pre.Data.TaskID)
+
_, err = d.request(ctx, "/open/v1/file/get_upload_urls", http.MethodPost, func(req *resty.Request) {
req.SetBody(data)
}, &resp)
if err != nil {
+ // 如果是分片超限错误,记录详细信息
+ if strings.Contains(err.Error(), "part list exceed") {
+ log.Errorf("[quark_open] Part list exceeded limit! Requested %d parts. Please check Quark API documentation for actual limit.", len(partInfo))
+ }
return upUrlInfo, err
}
@@ -340,13 +355,43 @@ func (d *QuarkOpen) upPart(ctx context.Context, upUrlInfo UpUrlInfo, partNumber
req.Header.Set("Accept-Encoding", "gzip")
req.Header.Set("User-Agent", "Go-http-client/1.1")
+ // ✅ 关键修复:使用更长的超时时间(10分钟)
+ // 慢速网络下大文件分片上传可能需要很长时间
+ client := &http.Client{
+ Timeout: 10 * time.Minute,
+ Transport: base.HttpClient.Transport,
+ }
+
// 发送请求
- resp, err := base.HttpClient.Do(req)
+ resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
+ // 检查是否为URL过期错误(403, 410等状态码)
+ if resp.StatusCode == 403 || resp.StatusCode == 410 {
+ body, _ := io.ReadAll(resp.Body)
+ return "", fmt.Errorf("upload url expired (status: %d): %s", resp.StatusCode, string(body))
+ }
+
+ // ✅ 关键修复:409 PartAlreadyExist 不是错误!
+ // 夸克使用Sequential模式,超时重试时如果分片已存在,说明第一次其实成功了
+ if resp.StatusCode == 409 {
+ body, _ := io.ReadAll(resp.Body)
+ // 从响应体中提取已存在分片的ETag
+ if strings.Contains(string(body), "PartAlreadyExist") {
+ // 尝试从XML响应中提取ETag
+ if etag := extractEtagFromXML(string(body)); etag != "" {
+ log.Infof("[quark_open] Part %d already exists (409), using existing ETag: %s", partNumber+1, etag)
+ return etag, nil
+ }
+ // 如果无法提取ETag,返回错误
+ log.Warnf("[quark_open] Part %d already exists but cannot extract ETag from response: %s", partNumber+1, string(body))
+ return "", fmt.Errorf("part already exists but ETag not found in response")
+ }
+ }
+
if resp.StatusCode != 200 {
body, _ := io.ReadAll(resp.Body)
return "", fmt.Errorf("up status: %d, error: %s", resp.StatusCode, string(body))
@@ -355,6 +400,23 @@ func (d *QuarkOpen) upPart(ctx context.Context, upUrlInfo UpUrlInfo, partNumber
return resp.Header.Get("Etag"), nil
}
+// extractEtagFromXML 从OSS的XML错误响应中提取ETag
+// 示例: "2F796AC486BB2891E3237D8BFDE020B5"
+func extractEtagFromXML(xmlBody string) string {
+ start := strings.Index(xmlBody, "")
+ if start == -1 {
+ return ""
+ }
+ start += len("")
+ end := strings.Index(xmlBody[start:], "")
+ if end == -1 {
+ return ""
+ }
+ etag := xmlBody[start : start+end]
+ // 移除引号
+ return strings.Trim(etag, "\"")
+}
+
func (d *QuarkOpen) upFinish(ctx context.Context, pre UpPreResp, partInfo []base.Json, etags []string) error {
// 创建 part_info_list
partInfoList := make([]base.Json, len(partInfo))
@@ -417,25 +479,36 @@ func (d *QuarkOpen) generateReqSign(method string, pathname string, signKey stri
}
func (d *QuarkOpen) refreshToken() error {
- refresh, access, err := d._refreshToken()
+ refresh, access, appID, signKey, err := d._refreshToken()
for i := 0; i < 3; i++ {
if err == nil {
break
} else {
log.Errorf("[quark_open] failed to refresh token: %s", err)
}
- refresh, access, err = d._refreshToken()
+ refresh, access, appID, signKey, err = d._refreshToken()
}
if err != nil {
return err
}
log.Infof("[quark_open] token exchange: %s -> %s", d.RefreshToken, refresh)
d.RefreshToken, d.AccessToken = refresh, access
+
+ // 如果在线API返回了AppID和SignKey,保存它们(不为空时才更新)
+ if appID != "" && appID != d.AppID {
+ d.AppID = appID
+ log.Infof("[quark_open] AppID updated from online API: %s", appID)
+ }
+ if signKey != "" && signKey != d.SignKey {
+ d.SignKey = signKey
+ log.Infof("[quark_open] SignKey updated from online API")
+ }
+
op.MustSaveDriverStorage(d)
return nil
}
-func (d *QuarkOpen) _refreshToken() (string, string, error) {
+func (d *QuarkOpen) _refreshToken() (string, string, string, string, error) {
if d.UseOnlineAPI && d.APIAddress != "" {
u := d.APIAddress
var resp RefreshTokenOnlineAPIResp
@@ -448,19 +521,20 @@ func (d *QuarkOpen) _refreshToken() (string, string, error) {
}).
Get(u)
if err != nil {
- return "", "", err
+ return "", "", "", "", err
}
if resp.RefreshToken == "" || resp.AccessToken == "" {
if resp.ErrorMessage != "" {
- return "", "", fmt.Errorf("failed to refresh token: %s", resp.ErrorMessage)
+ return "", "", "", "", fmt.Errorf("failed to refresh token: %s", resp.ErrorMessage)
}
- return "", "", fmt.Errorf("empty token returned from official API, a wrong refresh token may have been used")
+ return "", "", "", "", fmt.Errorf("empty token returned from official API, a wrong refresh token may have been used")
}
- return resp.RefreshToken, resp.AccessToken, nil
+ // 返回所有字段,包括AppID和SignKey
+ return resp.RefreshToken, resp.AccessToken, resp.AppID, resp.SignKey, nil
}
// TODO 本地刷新逻辑
- return "", "", fmt.Errorf("local refresh token logic is not implemented yet, please use online API or contact the developer")
+ return "", "", "", "", fmt.Errorf("local refresh token logic is not implemented yet, please use online API or contact the developer")
}
// 生成认证 Cookie
diff --git a/drivers/quark_uc/util.go b/drivers/quark_uc/util.go
index 48aabc48f..87798c6ef 100644
--- a/drivers/quark_uc/util.go
+++ b/drivers/quark_uc/util.go
@@ -6,6 +6,7 @@ import (
"encoding/base64"
"errors"
"fmt"
+ "html"
"io"
"net/http"
"strconv"
@@ -70,10 +71,10 @@ func (d *QuarkOrUC) GetFiles(parent string) ([]model.Obj, error) {
page := 1
size := 100
query := map[string]string{
- "pdir_fid": parent,
- "_size": strconv.Itoa(size),
- "_fetch_total": "1",
- "fetch_all_file": "1",
+ "pdir_fid": parent,
+ "_size": strconv.Itoa(size),
+ "_fetch_total": "1",
+ "fetch_all_file": "1",
"fetch_risk_file_name": "1",
}
if d.OrderBy != "none" {
@@ -89,6 +90,7 @@ func (d *QuarkOrUC) GetFiles(parent string) ([]model.Obj, error) {
return nil, err
}
for _, file := range resp.Data.List {
+ file.FileName = html.UnescapeString(file.FileName)
if d.OnlyListVideoFile {
// 开启后 只列出视频文件和文件夹
if file.IsDir() || file.Category == 1 {
diff --git a/drivers/quark_uc_tv/util.go b/drivers/quark_uc_tv/util.go
index d68a2f3c3..c0da6eb6b 100644
--- a/drivers/quark_uc_tv/util.go
+++ b/drivers/quark_uc_tv/util.go
@@ -8,6 +8,7 @@ import (
"errors"
"net/http"
"strconv"
+ "strings"
"time"
"github.com/OpenListTeam/OpenList/v4/internal/model"
@@ -70,8 +71,16 @@ func (d *QuarkUCTV) request(ctx context.Context, pathname string, method string,
return nil, err
}
// 判断 是否需要 刷新 access_token
- if e.Status == -1 && e.Errno == 10001 {
- // token 过期
+ errInfoLower := strings.ToLower(strings.TrimSpace(e.ErrorInfo))
+ maybeTokenInvalid :=
+ (e.Status == -1 && (e.Errno == 10001 || e.Errno == 11001)) ||
+ (errInfoLower != "" &&
+ (strings.Contains(errInfoLower, "access token") ||
+ strings.Contains(errInfoLower, "access_token") ||
+ strings.Contains(errInfoLower, "token无效") ||
+ strings.Contains(errInfoLower, "token 无效")))
+ if maybeTokenInvalid {
+ // token 过期 / 无效
err = d.getRefreshTokenByTV(ctx, d.Addition.RefreshToken, true)
if err != nil {
return nil, err
diff --git a/drivers/sftp/types.go b/drivers/sftp/types.go
index 00a32f001..a57076e08 100644
--- a/drivers/sftp/types.go
+++ b/drivers/sftp/types.go
@@ -48,8 +48,8 @@ func (d *SFTP) fileToObj(f os.FileInfo, dir string) (model.Obj, error) {
Size: _f.Size(),
Modified: _f.ModTime(),
IsFolder: _f.IsDir(),
- Path: target,
+ Path: path, // Use symlink's own path, not target path
}
- log.Debugf("[sftp] obj: %+v, is symlink: %v", obj, symlink)
+ log.Debugf("[sftp] obj: %+v, is symlink: %v, target: %s", obj, symlink, target)
return obj, nil
}
diff --git a/drivers/teldrive/driver.go b/drivers/teldrive/driver.go
index 11ba0971e..d420eb4d0 100644
--- a/drivers/teldrive/driver.go
+++ b/drivers/teldrive/driver.go
@@ -7,6 +7,7 @@ import (
"net/http"
"net/url"
"path"
+ "strconv"
"strings"
"github.com/OpenListTeam/OpenList/v4/drivers/base"
@@ -17,6 +18,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/go-resty/resty/v2"
"github.com/google/uuid"
+ "golang.org/x/sync/errgroup"
)
type Teldrive struct {
@@ -53,18 +55,58 @@ func (d *Teldrive) Drop(ctx context.Context) error {
}
func (d *Teldrive) List(ctx context.Context, dir model.Obj, args model.ListArgs) ([]model.Obj, error) {
- var listResp ListResp
+ var firstResp ListResp
err := d.request(http.MethodGet, "/api/files", func(req *resty.Request) {
req.SetQueryParams(map[string]string{
"path": dir.GetPath(),
- "limit": "1000", // overide default 500, TODO pagination
+ "limit": "500",
+ "page": "1",
})
- }, &listResp)
+ }, &firstResp)
+
if err != nil {
return nil, err
}
- return utils.SliceConvert(listResp.Items, func(src Object) (model.Obj, error) {
+ pagesData := make([][]Object, firstResp.Meta.TotalPages)
+ pagesData[0] = firstResp.Items
+
+ if firstResp.Meta.TotalPages > 1 {
+ g, _ := errgroup.WithContext(ctx)
+ g.SetLimit(8)
+
+ for i := 2; i <= firstResp.Meta.TotalPages; i++ {
+ page := i
+ g.Go(func() error {
+ var resp ListResp
+ err := d.request(http.MethodGet, "/api/files", func(req *resty.Request) {
+ req.SetQueryParams(map[string]string{
+ "path": dir.GetPath(),
+ "limit": "500",
+ "page": strconv.Itoa(page),
+ })
+ }, &resp)
+
+ if err != nil {
+ return err
+ }
+
+ pagesData[page-1] = resp.Items
+ return nil
+ })
+ }
+
+ if err := g.Wait(); err != nil {
+ return nil, err
+ }
+ }
+
+ var allItems []Object
+ for _, items := range pagesData {
+ allItems = append(allItems, items...)
+ }
+
+ return utils.SliceConvert(allItems, func(src Object) (model.Obj, error) {
return &model.Object{
Path: path.Join(dir.GetPath(), src.Name),
ID: src.ID,
@@ -184,7 +226,7 @@ func (d *Teldrive) Put(ctx context.Context, dstDir model.Obj, file model.FileStr
}
if totalParts <= 1 {
- return d.doSingleUpload(ctx, dstDir, file, up, totalParts, chunkSize, fileId)
+ return d.doSingleUpload(ctx, dstDir, file, up, maxRetried, totalParts, chunkSize, fileId)
}
return d.doMultiUpload(ctx, dstDir, file, up, maxRetried, totalParts, chunkSize, fileId)
diff --git a/drivers/teldrive/meta.go b/drivers/teldrive/meta.go
index 23bae5f94..cc7a5dbf7 100644
--- a/drivers/teldrive/meta.go
+++ b/drivers/teldrive/meta.go
@@ -11,6 +11,7 @@ type Addition struct {
Cookie string `json:"cookie" type:"string" required:"true" help:"access_token=xxx"`
UseShareLink bool `json:"use_share_link" type:"bool" default:"false" help:"Create share link when getting link to support 302. If disabled, you need to enable web proxy."`
ChunkSize int64 `json:"chunk_size" type:"number" default:"10" help:"Chunk size in MiB"`
+ RandomChunkName bool `json:"random_chunk_name" type:"bool" default:"true" help:"Random chunk name"`
UploadConcurrency int64 `json:"upload_concurrency" type:"number" default:"4" help:"Concurrency upload requests"`
}
diff --git a/drivers/teldrive/upload.go b/drivers/teldrive/upload.go
index 87cffa1ae..b94f5fc93 100644
--- a/drivers/teldrive/upload.go
+++ b/drivers/teldrive/upload.go
@@ -1,6 +1,8 @@
package teldrive
import (
+ "crypto/md5"
+ "encoding/hex"
"fmt"
"io"
"net/http"
@@ -16,6 +18,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/avast/retry-go"
"github.com/go-resty/resty/v2"
+ "github.com/google/uuid"
"github.com/pkg/errors"
"golang.org/x/net/context"
"golang.org/x/sync/errgroup"
@@ -38,6 +41,11 @@ func (d *Teldrive) touch(name, path string) error {
return nil
}
+func getMD5Hash(text string) string {
+ hash := md5.Sum([]byte(text))
+ return hex.EncodeToString(hash[:])
+}
+
func (d *Teldrive) createFileOnUploadSuccess(name, id, path string, uploadedFileParts []FilePart, totalSize int64) error {
remoteFileParts, err := d.getFilePart(id)
if err != nil {
@@ -101,12 +109,10 @@ func (d *Teldrive) getFilePart(fileId string) ([]FilePart, error) {
return uploadedParts, nil
}
-func (d *Teldrive) singleUploadRequest(fileId string, callback base.ReqCallback, resp interface{}) error {
+func (d *Teldrive) singleUploadRequest(ctx context.Context, fileId string, callback base.ReqCallback, resp any) error {
url := d.Address + "/api/uploads/" + fileId
client := resty.New().SetTimeout(0)
- ctx := context.Background()
-
req := client.R().
SetContext(ctx)
req.SetHeader("Cookie", d.Cookie)
@@ -135,16 +141,18 @@ func (d *Teldrive) singleUploadRequest(fileId string, callback base.ReqCallback,
}
func (d *Teldrive) doSingleUpload(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up model.UpdateProgress,
- totalParts int, chunkSize int64, fileId string) error {
+ maxRetried, totalParts int, chunkSize int64, fileId string) error {
totalSize := file.GetSize()
var fileParts []FilePart
var uploaded int64 = 0
- ss, err := stream.NewStreamSectionReader(file, int(totalSize), &up)
+ var partName string
+ chunkSize = min(totalSize, chunkSize)
+ ss, err := stream.NewStreamSectionReader(file, int(chunkSize), &up)
if err != nil {
return err
}
-
+ chunkCnt := 0
for uploaded < totalSize {
if utils.IsCanceled(ctx) {
return ctx.Err()
@@ -154,6 +162,7 @@ func (d *Teldrive) doSingleUpload(ctx context.Context, dstDir model.Obj, file mo
if err != nil {
return err
}
+ chunkCnt += 1
filePart := &FilePart{}
if err := retry.Do(func() error {
@@ -161,13 +170,19 @@ func (d *Teldrive) doSingleUpload(ctx context.Context, dstDir model.Obj, file mo
return err
}
- if err := d.singleUploadRequest(fileId, func(req *resty.Request) {
+ if d.RandomChunkName {
+ partName = getMD5Hash(uuid.New().String())
+ } else {
+ partName = file.GetName()
+ if totalParts > 1 {
+ partName = fmt.Sprintf("%s.part.%03d", file.GetName(), chunkCnt)
+ }
+ }
+
+ if err := d.singleUploadRequest(ctx, fileId, func(req *resty.Request) {
uploadParams := map[string]string{
- "partName": func() string {
- digits := len(strconv.Itoa(totalParts))
- return file.GetName() + fmt.Sprintf(".%0*d", digits, 1)
- }(),
- "partNo": strconv.Itoa(1),
+ "partName": partName,
+ "partNo": strconv.Itoa(chunkCnt),
"fileName": file.GetName(),
}
req.SetQueryParams(uploadParams)
@@ -180,7 +195,7 @@ func (d *Teldrive) doSingleUpload(ctx context.Context, dstDir model.Obj, file mo
return nil
},
retry.Context(ctx),
- retry.Attempts(3),
+ retry.Attempts(uint(maxRetried)),
retry.DelayType(retry.BackOffDelay),
retry.Delay(time.Second)); err != nil {
return err
@@ -189,8 +204,11 @@ func (d *Teldrive) doSingleUpload(ctx context.Context, dstDir model.Obj, file mo
if filePart.Name != "" {
fileParts = append(fileParts, *filePart)
uploaded += curChunkSize
- up(float64(uploaded) / float64(totalSize))
+ up(float64(uploaded) / float64(totalSize) * 100)
ss.FreeSectionReader(rd)
+ } else {
+ // For common situation this code won't reach
+ return fmt.Errorf("[Teldrive] upload chunk %d failed: filePart Somehow missing", chunkCnt)
}
}
@@ -318,6 +336,7 @@ func (d *Teldrive) doMultiUpload(ctx context.Context, dstDir model.Obj, file mod
func (d *Teldrive) uploadSingleChunk(ctx context.Context, fileId string, task chunkTask, totalParts, maxRetried int) (*FilePart, error) {
filePart := &FilePart{}
retryCount := 0
+ var partName string
defer task.ss.FreeSectionReader(task.reader)
for {
@@ -331,12 +350,22 @@ func (d *Teldrive) uploadSingleChunk(ctx context.Context, fileId string, task ch
return &existingPart, nil
}
- err := d.singleUploadRequest(fileId, func(req *resty.Request) {
+ if _, err := task.reader.Seek(0, io.SeekStart); err != nil {
+ return nil, err
+ }
+
+ if d.RandomChunkName {
+ partName = getMD5Hash(uuid.New().String())
+ } else {
+ partName = task.fileName
+ if totalParts > 1 {
+ partName = fmt.Sprintf("%s.part.%03d", task.fileName, task.chunkIdx)
+ }
+ }
+
+ err := d.singleUploadRequest(ctx, fileId, func(req *resty.Request) {
uploadParams := map[string]string{
- "partName": func() string {
- digits := len(strconv.Itoa(totalParts))
- return task.fileName + fmt.Sprintf(".%0*d", digits, task.chunkIdx)
- }(),
+ "partName": partName,
"partNo": strconv.Itoa(task.chunkIdx),
"fileName": task.fileName,
}
diff --git a/go.mod b/go.mod
index e751bcfe0..c36ac1ca0 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,8 @@
module github.com/OpenListTeam/OpenList/v4
-go 1.23.4
+go 1.24.0
+
+toolchain go1.24.13
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.18.1
@@ -14,7 +16,7 @@ require (
github.com/OpenListTeam/wopan-sdk-go v0.1.5
github.com/ProtonMail/go-crypto v1.3.0
github.com/ProtonMail/gopenpgp/v2 v2.9.0
- github.com/SheltonZhu/115driver v1.1.1
+ github.com/SheltonZhu/115driver v1.2.3
github.com/aliyun/aliyun-oss-go-sdk v3.0.2+incompatible
github.com/antchfx/htmlquery v1.3.5
github.com/antchfx/xpath v1.3.5
@@ -73,11 +75,11 @@ require (
github.com/upyun/go-sdk/v3 v3.0.4
github.com/winfsp/cgofuse v1.6.0
github.com/zzzhr1990/go-common-entity v0.0.0-20250202070650-1a200048f0d3
- golang.org/x/crypto v0.40.0
+ golang.org/x/crypto v0.46.0
golang.org/x/image v0.29.0
- golang.org/x/net v0.42.0
- golang.org/x/oauth2 v0.30.0
- golang.org/x/time v0.12.0
+ golang.org/x/net v0.48.0
+ golang.org/x/oauth2 v0.34.0
+ golang.org/x/time v0.14.0
google.golang.org/appengine v1.6.8
gopkg.in/ldap.v3 v3.1.0
gorm.io/driver/mysql v1.5.7
@@ -87,7 +89,7 @@ require (
)
require (
- cloud.google.com/go/compute/metadata v0.7.0 // indirect
+ cloud.google.com/go/compute/metadata v0.9.0 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.1 // indirect
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
github.com/ProtonMail/gluon v0.17.1-0.20230724134000-308be39be96e // indirect
@@ -124,12 +126,12 @@ require (
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
go.uber.org/mock v0.5.0 // indirect
golang.org/x/exp v0.0.0-20250606033433-dcc06ee1d476 // indirect
- golang.org/x/mod v0.27.0 // indirect
+ golang.org/x/mod v0.30.0 // indirect
gopkg.in/go-jose/go-jose.v2 v2.6.3 // indirect
)
require (
- github.com/OpenListTeam/115-sdk-go v0.2.2
+ github.com/OpenListTeam/115-sdk-go v0.2.3
github.com/STARRY-S/zip v0.2.1 // indirect
github.com/aymerick/douceur v0.2.0 // indirect
github.com/blevesearch/go-faiss v1.0.25 // indirect
@@ -159,7 +161,7 @@ require (
github.com/taruti/bytepool v0.0.0-20160310082835-5e3a9ea56543 // indirect
github.com/ulikunitz/xz v0.5.12 // indirect
github.com/yuin/goldmark v1.7.13
- go4.org v0.0.0-20230225012048-214862532bf5
+ go4.org v0.0.0-20260112195520-a5071408f32f
resty.dev/v3 v3.0.0-beta.2 // indirect
)
@@ -285,14 +287,14 @@ require (
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.etcd.io/bbolt v1.4.0 // indirect
golang.org/x/arch v0.18.0 // indirect
- golang.org/x/sync v0.16.0
- golang.org/x/sys v0.34.0
- golang.org/x/term v0.33.0 // indirect
- golang.org/x/text v0.27.0
- golang.org/x/tools v0.35.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
- google.golang.org/grpc v1.73.0
- google.golang.org/protobuf v1.36.6 // indirect
+ golang.org/x/sync v0.19.0
+ golang.org/x/sys v0.40.0
+ golang.org/x/term v0.38.0 // indirect
+ golang.org/x/text v0.32.0
+ golang.org/x/tools v0.39.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect
+ google.golang.org/grpc v1.78.0
+ google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
diff --git a/go.sum b/go.sum
index 57964be4c..b9a4570bd 100644
--- a/go.sum
+++ b/go.sum
@@ -16,6 +16,7 @@ cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbf
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU=
cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo=
+cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
@@ -47,8 +48,8 @@ github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7Y
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd h1:nzE1YQBdx1bq9IlZinHa+HVffy+NmVRoKr+wHN8fpLE=
github.com/Max-Sum/base32768 v0.0.0-20230304063302-18e6ce5945fd/go.mod h1:C8yoIfvESpM3GD07OCHU7fqI7lhwyZ2Td1rbNbTAhnc=
-github.com/OpenListTeam/115-sdk-go v0.2.2 h1:JCrGHqQjBX3laOA6Hw4CuBovSg7g+FC5s0LEAYsRciU=
-github.com/OpenListTeam/115-sdk-go v0.2.2/go.mod h1:cfvitk2lwe6036iNi2h+iNxwxWDifKZsSvNtrur5BqU=
+github.com/OpenListTeam/115-sdk-go v0.2.3 h1:nDNz0GxgliW+nT2Ds486k/rp/GgJj7Ngznc98ZBUwZo=
+github.com/OpenListTeam/115-sdk-go v0.2.3/go.mod h1:cfvitk2lwe6036iNi2h+iNxwxWDifKZsSvNtrur5BqU=
github.com/OpenListTeam/go-cache v0.1.0 h1:eV2+FCP+rt+E4OCJqLUW7wGccWZNJMV0NNkh+uChbAI=
github.com/OpenListTeam/go-cache v0.1.0/go.mod h1:AHWjKhNK3LE4rorVdKyEALDHoeMnP8SjiNyfVlB+Pz4=
github.com/OpenListTeam/gsync v0.1.0 h1:ywzGybOvA3lW8K1BUjKZ2IUlT2FSlzPO4DOazfYXjcs=
@@ -81,8 +82,8 @@ github.com/RoaringBitmap/roaring/v2 v2.4.5 h1:uGrrMreGjvAtTBobc0g5IrW1D5ldxDQYe2
github.com/RoaringBitmap/roaring/v2 v2.4.5/go.mod h1:FiJcsfkGje/nZBZgCu0ZxCPOKD/hVXDS2dXi7/eUFE0=
github.com/STARRY-S/zip v0.2.1 h1:pWBd4tuSGm3wtpoqRZZ2EAwOmcHK6XFf7bU9qcJXyFg=
github.com/STARRY-S/zip v0.2.1/go.mod h1:xNvshLODWtC4EJ702g7cTYn13G53o1+X9BWnPFpcWV4=
-github.com/SheltonZhu/115driver v1.1.1 h1:9EMhe2ZJflGiAaZbYInw2jqxTcqZNF+DtVDsEy70aFU=
-github.com/SheltonZhu/115driver v1.1.1/go.mod h1:rKvNd4Y4OkXv1TMbr/SKjGdcvMQxh6AW5Tw9w0CJb7E=
+github.com/SheltonZhu/115driver v1.2.3 h1:94XMP/ey7VXIlpoBLIJHEoXu7N8YsELZlXVbxWcDDvk=
+github.com/SheltonZhu/115driver v1.2.3/go.mod h1:Zk7Qz7SYO1QU0SJIne6DnUD2k36S3wx/KbsQpxcfY/Y=
github.com/abbot/go-http-auth v0.4.0 h1:QjmvZ5gSC7jm3Zg54DqWE/T5m1t2AfDu6QlXJT0EVT0=
github.com/abbot/go-http-auth v0.4.0/go.mod h1:Cz6ARTIzApMJDzh5bRMSUou6UMSp0IEXg9km/ci7TJM=
github.com/aead/ecdh v0.2.0 h1:pYop54xVaq/CEREFEcukHRZfTdjiWvYIsZDXXrBapQQ=
@@ -752,6 +753,8 @@ go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
go4.org v0.0.0-20230225012048-214862532bf5 h1:nifaUDeh+rPaBCMPMQHZmvJf+QdpLFnuQPwx+LxVmtc=
go4.org v0.0.0-20230225012048-214862532bf5/go.mod h1:F57wTi5Lrj6WLyswp5EYV1ncrEbFGHD4hhz6S1ZYeaU=
+go4.org v0.0.0-20260112195520-a5071408f32f h1:ziUVAjmTPwQMBmYR1tbdRFJPtTcQUI12fH9QQjfb0Sw=
+go4.org v0.0.0-20260112195520-a5071408f32f/go.mod h1:ZRJnO5ZI4zAwMFp+dS1+V6J6MSyAowhRqAE+DPa1Xp0=
gocv.io/x/gocv v0.25.0/go.mod h1:Rar2PS6DV+T4FL+PM535EImD/h13hGVaHhnCu1xarBs=
golang.org/x/arch v0.18.0 h1:WN9poc33zL4AzGxqf8VtpKUnGvMi8O9lhNyBMF/85qc=
golang.org/x/arch v0.18.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk=
@@ -770,6 +773,8 @@ golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
+golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
+golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -807,6 +812,8 @@ golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ=
golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc=
+golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
+golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -834,6 +841,8 @@ golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
+golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
+golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -841,6 +850,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
+golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=
+golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -855,6 +866,8 @@ golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
+golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -889,6 +902,8 @@ golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
+golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
+golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -901,6 +916,8 @@ golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
+golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
+golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -917,10 +934,14 @@ golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
+golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
+golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
+golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
+golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -953,6 +974,8 @@ golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
+golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
+golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -988,6 +1011,8 @@ google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvx
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -997,10 +1022,14 @@ google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
+google.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=
+google.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
+google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
+google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/internal/bootstrap/config.go b/internal/bootstrap/config.go
index 116b4cd35..74e218f8f 100644
--- a/internal/bootstrap/config.go
+++ b/internal/bootstrap/config.go
@@ -54,6 +54,7 @@ func InitConfig() {
}
}
configPath = filepath.Clean(configPath)
+ conf.ConfigPath = configPath
log.Infof("reading config file: %s", configPath)
if !utils.Exists(configPath) {
log.Infof("config file not exists, creating default config file")
diff --git a/internal/bootstrap/patch/all.go b/internal/bootstrap/patch/all.go
index c4a72a966..5d4c814dc 100644
--- a/internal/bootstrap/patch/all.go
+++ b/internal/bootstrap/patch/all.go
@@ -44,6 +44,7 @@ var UpgradePatches = []VersionPatches{
Version: "v4.1.9",
Patches: []func(){
v4_1_9.EnableWebDavProxy,
+ v4_1_9.ResetSkipTlsVerify,
},
},
}
diff --git a/internal/bootstrap/patch/v4_1_9/skip_tls.go b/internal/bootstrap/patch/v4_1_9/skip_tls.go
new file mode 100644
index 000000000..1d9858d95
--- /dev/null
+++ b/internal/bootstrap/patch/v4_1_9/skip_tls.go
@@ -0,0 +1,32 @@
+package v4_1_9
+
+import (
+ "os"
+ "strings"
+
+ "github.com/OpenListTeam/OpenList/v4/internal/conf"
+ "github.com/OpenListTeam/OpenList/v4/pkg/utils"
+)
+
+func ResetSkipTlsVerify() {
+ if !conf.Conf.TlsInsecureSkipVerify {
+ return
+ }
+ if !strings.HasPrefix(conf.Version, "v") {
+ return
+ }
+
+ conf.Conf.TlsInsecureSkipVerify = false
+
+ confBody, err := utils.Json.MarshalIndent(conf.Conf, "", " ")
+ if err != nil {
+ utils.Log.Errorf("[ResetSkipTlsVerify] failed to rewrite config: marshal config error: %+v", err)
+ return
+ }
+ err = os.WriteFile(conf.ConfigPath, confBody, 0o777)
+ if err != nil {
+ utils.Log.Errorf("[ResetSkipTlsVerify] failed to rewrite config: update config struct error: %+v", err)
+ return
+ }
+ utils.Log.Infof("[ResetSkipTlsVerify] succeeded to set tls_insecure_skip_verify to false")
+}
diff --git a/internal/conf/config.go b/internal/conf/config.go
index c5ace8005..f347380d8 100644
--- a/internal/conf/config.go
+++ b/internal/conf/config.go
@@ -182,7 +182,7 @@ func DefaultConfig(dataDir string) *Config {
MmapThreshold: 4,
MaxConnections: 0,
MaxConcurrency: 64,
- TlsInsecureSkipVerify: true,
+ TlsInsecureSkipVerify: false,
Tasks: TasksConfig{
Download: TaskConfig{
Workers: 5,
diff --git a/internal/conf/var.go b/internal/conf/var.go
index 9a02eca26..972f69997 100644
--- a/internal/conf/var.go
+++ b/internal/conf/var.go
@@ -15,8 +15,9 @@ var (
)
var (
- Conf *Config
- URL *url.URL
+ Conf *Config
+ URL *url.URL
+ ConfigPath string
)
var SlicesMap = make(map[string][]string)
diff --git a/internal/fs/copy_move.go b/internal/fs/copy_move.go
index e78fc9be8..77c2015b5 100644
--- a/internal/fs/copy_move.go
+++ b/internal/fs/copy_move.go
@@ -17,6 +17,7 @@ import (
"github.com/OpenListTeam/OpenList/v4/server/common"
"github.com/OpenListTeam/tache"
"github.com/pkg/errors"
+ log "github.com/sirupsen/logrus"
)
type taskType uint8
@@ -192,6 +193,21 @@ func (t *FileTransferTask) RunWithNextTaskCallback(f func(nextTask *FileTransfer
dstActualPath := stdpath.Join(t.DstActualPath, srcObj.GetName())
task_group.TransferCoordinator.AppendPayload(t.groupID, task_group.DstPathToHook(dstActualPath))
+ // Pre-create the destination directory first
+ t.Status = "ensuring destination directory exists"
+ if err := op.MakeDir(t.Ctx(), t.DstStorage, dstActualPath); err != nil {
+ log.Warnf("[copy_move] failed to ensure destination dir [%s]: %v, will continue", dstActualPath, err)
+ // Continue anyway - the directory might exist but Get failed due to cache issues
+ }
+
+ // Pre-create subdirectories (up to 1 level deep) to avoid deep recursion issues
+ // Balances between reducing API calls and maintaining fault tolerance
+ t.Status = "pre-creating subdirectories"
+ if err := t.preCreateDirectoryTree(objs, dstActualPath, 1); err != nil {
+ log.Warnf("[copy_move] failed to pre-create directory tree: %v, will continue", err)
+ // Continue anyway - individual directories will be created on-demand
+ }
+
existedObjs := make(map[string]bool)
if t.TaskType == merge {
dstObjs, err := op.List(t.Ctx(), t.DstStorage, dstActualPath, model.ListArgs{})
@@ -263,6 +279,58 @@ func (t *FileTransferTask) RunWithNextTaskCallback(f func(nextTask *FileTransfer
return op.Put(context.WithValue(t.Ctx(), conf.SkipHookKey, struct{}{}), t.DstStorage, t.DstActualPath, ss, t.SetProgress)
}
+// preCreateDirectoryTree recursively scans source directory tree and pre-creates
+// directories on destination up to maxDepth levels to avoid deep MakeDir recursion issues.
+// maxDepth=0 means only current level, maxDepth=1 means current+1 level, etc.
+func (t *FileTransferTask) preCreateDirectoryTree(objs []model.Obj, dstBasePath string, maxDepth int) error {
+ // First pass: create immediate subdirectories
+ var subdirs []model.Obj
+ for _, obj := range objs {
+ // Check for cancellation
+ if err := t.Ctx().Err(); err != nil {
+ return err
+ }
+
+ if obj.IsDir() {
+ subdirPath := stdpath.Join(dstBasePath, obj.GetName())
+ if err := op.MakeDir(t.Ctx(), t.DstStorage, subdirPath); err != nil {
+ log.Debugf("[copy_move] failed to pre-create dir [%s]: %v", subdirPath, err)
+ // Continue with other directories
+ }
+ subdirs = append(subdirs, obj)
+ }
+ }
+
+ // Stop recursion if max depth reached
+ if maxDepth <= 0 {
+ return nil
+ }
+
+ // Second pass: recursively scan and create nested subdirectories
+ for _, subdir := range subdirs {
+ if err := t.Ctx().Err(); err != nil {
+ return err
+ }
+
+ // List contents of this subdirectory
+ subdirSrcPath := stdpath.Join(t.SrcActualPath, subdir.GetName())
+ subdirDstPath := stdpath.Join(dstBasePath, subdir.GetName())
+
+ subObjs, err := op.List(t.Ctx(), t.SrcStorage, subdirSrcPath, model.ListArgs{})
+ if err != nil {
+ log.Debugf("[copy_move] failed to list subdir [%s] for pre-creation: %v", subdirSrcPath, err)
+ continue // Skip this subdirectory, will handle when processing
+ }
+
+ // Recursively create subdirectories with decreased depth
+ if err := t.preCreateDirectoryTree(subObjs, subdirDstPath, maxDepth-1); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
var (
CopyTaskManager *tache.Manager[*FileTransferTask]
MoveTaskManager *tache.Manager[*FileTransferTask]
diff --git a/internal/model/args.go b/internal/model/args.go
index 073c94a63..d165908fb 100644
--- a/internal/model/args.go
+++ b/internal/model/args.go
@@ -25,6 +25,10 @@ type LinkArgs struct {
Redirect bool
}
+// LinkRefresher is a callback function type for refreshing download links
+// It returns a new Link and the associated object, or an error
+type LinkRefresher func(ctx context.Context) (*Link, Obj, error)
+
type Link struct {
URL string `json:"url"` // most common way
Header http.Header `json:"header"` // needed header (for url)
@@ -37,6 +41,10 @@ type Link struct {
PartSize int `json:"part_size"`
ContentLength int64 `json:"content_length"` // 转码视频、缩略图
+ // Refresher is a callback to refresh the link when it expires during long downloads
+ // This field is not serialized and is optional - if nil, no refresh will be attempted
+ Refresher LinkRefresher `json:"-"`
+
utils.SyncClosers `json:"-"`
// 如果SyncClosers中的资源被关闭后Link将不可用,则此值应为 true
RequireReference bool `json:"-"`
diff --git a/internal/net/serve.go b/internal/net/serve.go
index 6a20460b1..ee288b86a 100644
--- a/internal/net/serve.go
+++ b/internal/net/serve.go
@@ -7,6 +7,7 @@ import (
"fmt"
"io"
"mime/multipart"
+ stdnet "net" // 标准库net包,用于Dialer
"net/http"
"strconv"
"strings"
@@ -286,12 +287,20 @@ func NewHttpClient() *http.Client {
transport := &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: &tls.Config{InsecureSkipVerify: conf.Conf.TlsInsecureSkipVerify},
+ // 快速连接超时:10秒建立连接,失败快速重试
+ DialContext: (&stdnet.Dialer{
+ Timeout: 10 * time.Second, // TCP握手超时
+ KeepAlive: 30 * time.Second, // TCP keep-alive
+ }).DialContext,
+ // 响应头超时:15秒等待服务器响应头(平衡API调用与下载检测)
+ ResponseHeaderTimeout: 15 * time.Second,
+ // 允许长时间读取数据(无 IdleConnTimeout 限制)
}
SetProxyIfConfigured(transport)
return &http.Client{
- Timeout: time.Hour * 48,
+ Timeout: time.Hour * 48, // 总超时保持48小时(允许大文件慢速下载)
Transport: transport,
}
}
diff --git a/internal/offline_download/115_open/client.go b/internal/offline_download/115_open/client.go
index d12e02ec5..e8d33326b 100644
--- a/internal/offline_download/115_open/client.go
+++ b/internal/offline_download/115_open/client.go
@@ -3,6 +3,7 @@ package _115_open
import (
"context"
"fmt"
+ "strings"
_115_open "github.com/OpenListTeam/OpenList/v4/drivers/115_open"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
@@ -70,10 +71,54 @@ func (o *Open115) AddURL(args *tool.AddUrlArgs) (string, error) {
}
hashs, err := driver115Open.OfflineDownload(ctx, []string{args.Url}, parentDir)
- if err != nil || len(hashs) < 1 {
+
+ // 检查是否是重复链接错误 (code: 10008)
+ if err != nil {
+ // 尝试从错误信息中判断是否是重复链接
+ errStr := err.Error()
+ isDuplicateError := false
+
+ // 检查是否包含"重复"、"已存在"等关键词,或错误码10008
+ if strings.Contains(errStr, "10008") ||
+ strings.Contains(errStr, "重复") ||
+ strings.Contains(errStr, "已存在") ||
+ strings.Contains(errStr, "duplicate") {
+ isDuplicateError = true
+ }
+
+ if isDuplicateError {
+ // 尝试查找并删除已存在的相同URL任务,然后重试
+ taskList, listErr := driver115Open.OfflineList(ctx)
+ if listErr == nil && taskList != nil {
+ // 查找匹配的任务
+ for _, task := range taskList.Tasks {
+ if task.URL == args.Url {
+ // 找到重复任务,删除它
+ deleteErr := driver115Open.DeleteOfflineTask(ctx, task.InfoHash, false)
+ if deleteErr == nil {
+ // 删除成功,重新尝试添加
+ hashs, err = driver115Open.OfflineDownload(ctx, []string{args.Url}, parentDir)
+ if err != nil {
+ return "", fmt.Errorf("failed to add offline download task after removing duplicate: %w", err)
+ }
+ if len(hashs) > 0 {
+ return hashs[0], nil
+ }
+ }
+ break
+ }
+ }
+ }
+ }
+
+ // 如果不是重复错误或处理失败,返回原始错误
return "", fmt.Errorf("failed to add offline download task: %w", err)
}
+ if len(hashs) < 1 {
+ return "", fmt.Errorf("failed to add offline download task: no task hash returned")
+ }
+
return hashs[0], nil
}
@@ -129,8 +174,8 @@ func (o *Open115) Status(task *tool.DownloadTask) (*tool.Status, error) {
return s, nil
}
}
- s.Err = fmt.Errorf("the task has been deleted")
- return nil, nil
+ // 任务不在列表中,可能已完成或被删除
+ return nil, fmt.Errorf("task %s not found in offline list", task.GID)
}
var _ tool.Tool = (*Open115)(nil)
diff --git a/internal/offline_download/115_open/client_test.go b/internal/offline_download/115_open/client_test.go
new file mode 100644
index 000000000..b675e65fe
--- /dev/null
+++ b/internal/offline_download/115_open/client_test.go
@@ -0,0 +1,248 @@
+package _115_open
+
+import (
+ "context"
+ "fmt"
+ "strings"
+ "testing"
+
+ sdk "github.com/OpenListTeam/115-sdk-go"
+ _115_open "github.com/OpenListTeam/OpenList/v4/drivers/115_open"
+ "github.com/OpenListTeam/OpenList/v4/internal/model"
+ "github.com/OpenListTeam/OpenList/v4/internal/offline_download/tool"
+)
+
+// Mock implementation of Open115 driver for testing
+type mockOpen115 struct {
+ _115_open.Open115
+ offlineDownloadFunc func(ctx context.Context, uris []string, dstDir model.Obj) ([]string, error)
+ offlineListFunc func(ctx context.Context) (*sdk.OfflineTaskListResp, error)
+ deleteOfflineFunc func(ctx context.Context, infoHash string, deleteFiles bool) error
+}
+
+func (m *mockOpen115) OfflineDownload(ctx context.Context, uris []string, dstDir model.Obj) ([]string, error) {
+ if m.offlineDownloadFunc != nil {
+ return m.offlineDownloadFunc(ctx, uris, dstDir)
+ }
+ return nil, fmt.Errorf("not implemented")
+}
+
+func (m *mockOpen115) OfflineList(ctx context.Context) (*sdk.OfflineTaskListResp, error) {
+ if m.offlineListFunc != nil {
+ return m.offlineListFunc(ctx)
+ }
+ return nil, fmt.Errorf("not implemented")
+}
+
+func (m *mockOpen115) DeleteOfflineTask(ctx context.Context, infoHash string, deleteFiles bool) error {
+ if m.deleteOfflineFunc != nil {
+ return m.deleteOfflineFunc(ctx, infoHash, deleteFiles)
+ }
+ return fmt.Errorf("not implemented")
+}
+
+// TestAddURL_Success tests successful URL addition
+func TestAddURL_Success(t *testing.T) {
+ t.Skip("需要真实的storage环境,跳过此测试")
+}
+
+// TestAddURL_DuplicateHandling tests the duplicate URL handling logic
+func TestAddURL_DuplicateHandling(t *testing.T) {
+ t.Skip("需要真实的storage环境,跳过此测试")
+}
+
+// TestDuplicateLinkRetryLogic tests the logic without actual API calls
+func TestDuplicateLinkRetryLogic(t *testing.T) {
+ testURL := "https://example.com/test.torrent"
+ testHash := "test_hash_123"
+
+ t.Run("首次添加成功", func(t *testing.T) {
+ // 模拟首次添加成功的场景
+ callCount := 0
+ mock := &mockOpen115{
+ offlineDownloadFunc: func(ctx context.Context, uris []string, dstDir model.Obj) ([]string, error) {
+ callCount++
+ if callCount == 1 {
+ return []string{testHash}, nil
+ }
+ return nil, fmt.Errorf("unexpected call")
+ },
+ }
+
+ hashes, err := mock.OfflineDownload(context.Background(), []string{testURL}, nil)
+ if err != nil {
+ t.Errorf("首次添加失败: %v", err)
+ }
+ if len(hashes) != 1 || hashes[0] != testHash {
+ t.Errorf("期望hash=%s, 实际=%v", testHash, hashes)
+ }
+ if callCount != 1 {
+ t.Errorf("期望调用1次, 实际调用%d次", callCount)
+ }
+ })
+
+ t.Run("检测到重复错误并自动删除重试", func(t *testing.T) {
+ // 模拟重复链接错误的场景
+ callCount := 0
+ deleteCount := 0
+
+ mock := &mockOpen115{
+ offlineDownloadFunc: func(ctx context.Context, uris []string, dstDir model.Obj) ([]string, error) {
+ callCount++
+ if callCount == 1 {
+ // 首次调用返回重复错误
+ return nil, fmt.Errorf("code: 10008, message: 任务已存在,请勿输入重复的链接地址")
+ } else if callCount == 2 {
+ // 删除后重试,返回成功
+ return []string{testHash}, nil
+ }
+ return nil, fmt.Errorf("unexpected call count: %d", callCount)
+ },
+ offlineListFunc: func(ctx context.Context) (*sdk.OfflineTaskListResp, error) {
+ // 返回包含重复任务的列表
+ return &sdk.OfflineTaskListResp{
+ Tasks: []sdk.OfflineTask{
+ {
+ InfoHash: "old_hash_456",
+ URL: testURL,
+ Status: 1, // 下载中
+ },
+ },
+ }, nil
+ },
+ deleteOfflineFunc: func(ctx context.Context, infoHash string, deleteFiles bool) error {
+ deleteCount++
+ if infoHash != "old_hash_456" {
+ t.Errorf("期望删除hash=old_hash_456, 实际=%s", infoHash)
+ }
+ if deleteFiles {
+ t.Error("不应该删除源文件")
+ }
+ return nil
+ },
+ }
+
+ // 模拟完整的错误处理逻辑
+ ctx := context.Background()
+
+ // 第一次调用返回重复错误
+ _, err := mock.OfflineDownload(ctx, []string{testURL}, nil)
+ if err == nil {
+ t.Error("第一次应该返回错误")
+ }
+
+ // 检查是否是重复错误
+ errStr := err.Error()
+ if !strings.Contains(errStr, "10008") && !strings.Contains(errStr, "重复") {
+ t.Errorf("应该是重复错误,实际错误: %v", err)
+ }
+
+ // 获取任务列表
+ taskList, err := mock.OfflineList(ctx)
+ if err != nil {
+ t.Errorf("获取任务列表失败: %v", err)
+ }
+
+ // 查找并删除重复任务
+ found := false
+ for _, task := range taskList.Tasks {
+ if task.URL == testURL {
+ err := mock.DeleteOfflineTask(ctx, task.InfoHash, false)
+ if err != nil {
+ t.Errorf("删除任务失败: %v", err)
+ }
+ found = true
+ break
+ }
+ }
+
+ if !found {
+ t.Error("未找到重复任务")
+ }
+
+ // 重试添加
+ hashes, err := mock.OfflineDownload(ctx, []string{testURL}, nil)
+ if err != nil {
+ t.Errorf("重试添加失败: %v", err)
+ }
+ if len(hashes) != 1 || hashes[0] != testHash {
+ t.Errorf("期望hash=%s, 实际=%v", testHash, hashes)
+ }
+
+ if callCount != 2 {
+ t.Errorf("期望调用OfflineDownload 2次, 实际%d次", callCount)
+ }
+ if deleteCount != 1 {
+ t.Errorf("期望调用DeleteOfflineTask 1次, 实际%d次", deleteCount)
+ }
+ })
+
+ t.Run("重复链接但删除失败", func(t *testing.T) {
+ mock := &mockOpen115{
+ offlineDownloadFunc: func(ctx context.Context, uris []string, dstDir model.Obj) ([]string, error) {
+ // 始终返回重复错误
+ return nil, fmt.Errorf("code: 10008, message: 任务已存在")
+ },
+ offlineListFunc: func(ctx context.Context) (*sdk.OfflineTaskListResp, error) {
+ return &sdk.OfflineTaskListResp{
+ Tasks: []sdk.OfflineTask{
+ {
+ InfoHash: "old_hash_789",
+ URL: testURL,
+ },
+ },
+ }, nil
+ },
+ deleteOfflineFunc: func(ctx context.Context, infoHash string, deleteFiles bool) error {
+ return fmt.Errorf("删除失败:权限不足")
+ },
+ }
+
+ // 删除失败时应该返回错误
+ ctx := context.Background()
+ _, err := mock.OfflineDownload(ctx, []string{testURL}, nil)
+ if err == nil {
+ t.Error("应该返回错误")
+ }
+ })
+}
+
+// TestOpen115_Name tests the Name method
+func TestOpen115_Name(t *testing.T) {
+ o := &Open115{}
+ name := o.Name()
+ expected := "115 Open"
+ if name != expected {
+ t.Errorf("期望名称=%s, 实际=%s", expected, name)
+ }
+}
+
+// TestOpen115_Items tests the Items method
+func TestOpen115_Items(t *testing.T) {
+ o := &Open115{}
+ items := o.Items()
+ if items != nil {
+ t.Error("Items应该返回nil")
+ }
+}
+
+// TestOpen115_Run tests the Run method
+func TestOpen115_Run(t *testing.T) {
+ o := &Open115{}
+ err := o.Run(&tool.DownloadTask{})
+ if err == nil {
+ t.Error("Run应该返回NotSupport错误")
+ }
+}
+
+// TestOpen115_Init tests the Init method
+func TestOpen115_Init(t *testing.T) {
+ o := &Open115{}
+ msg, err := o.Init()
+ if err != nil {
+ t.Errorf("Init失败: %v", err)
+ }
+ if msg != "ok" {
+ t.Errorf("期望消息='ok', 实际=%s", msg)
+ }
+}
diff --git a/internal/offline_download/tool/download.go b/internal/offline_download/tool/download.go
index 50a4f6343..e033cccba 100644
--- a/internal/offline_download/tool/download.go
+++ b/internal/offline_download/tool/download.go
@@ -147,11 +147,11 @@ func (t *DownloadTask) Update() (bool, error) {
if err != nil {
t.callStatusRetried++
log.Errorf("failed to get status of %s, retried %d times", t.ID, t.callStatusRetried)
+ if t.callStatusRetried > 10 {
+ return true, errors.Errorf("failed to get status of %s, retried %d times", t.ID, t.callStatusRetried)
+ }
return false, nil
}
- if t.callStatusRetried > 5 {
- return true, errors.Errorf("failed to get status of %s, retried %d times", t.ID, t.callStatusRetried)
- }
t.callStatusRetried = 0
t.SetProgress(info.Progress)
t.SetTotalBytes(info.TotalBytes)
diff --git a/internal/op/fs.go b/internal/op/fs.go
index 5116bbef5..534e73697 100644
--- a/internal/op/fs.go
+++ b/internal/op/fs.go
@@ -150,6 +150,7 @@ func Get(ctx context.Context, storage driver.Driver, path string, excludeTempObj
Modified: storage.GetStorage().Modified,
IsFolder: true,
Mask: model.Locked,
+ HashInfo: utils.NewHashInfo(nil, ""),
}, nil
case driver.IRootPath:
return &model.Object{
@@ -158,6 +159,7 @@ func Get(ctx context.Context, storage driver.Driver, path string, excludeTempObj
Modified: storage.GetStorage().Modified,
Mask: model.Locked,
IsFolder: true,
+ HashInfo: utils.NewHashInfo(nil, ""),
}, nil
}
return nil, errors.New("please implement GetRooter or IRootPath or IRootId interface")
@@ -247,6 +249,8 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li
ol.link.SyncClosers.AcquireReference() || !ol.link.RequireReference {
return ol.link, ol.obj, nil
}
+ // SyncClosers 已关闭(文件句柄已关闭),删除缓存条目,重新获取
+ Cache.linkCache.DeleteKey(key)
}
fn := func() (*objWithLink, error) {
@@ -262,11 +266,37 @@ func Link(ctx context.Context, storage driver.Driver, path string, args model.Li
if err != nil {
return nil, errors.Wrapf(err, "failed get link")
}
+
+ // Set up link refresher for automatic refresh on expiry during long downloads
+ // This enables all download scenarios to handle link expiration gracefully
+ if link.Refresher == nil {
+ storageCopy := storage
+ pathCopy := path
+ argsCopy := args
+ link.Refresher = func(refreshCtx context.Context) (*model.Link, model.Obj, error) {
+ log.Infof("Refreshing download link for: %s", pathCopy)
+ // Get fresh link directly from storage, bypassing cache
+ file, err := GetUnwrap(refreshCtx, storageCopy, pathCopy)
+ if err != nil {
+ return nil, nil, errors.WithMessage(err, "failed to get file for refresh")
+ }
+ newLink, err := storageCopy.Link(refreshCtx, file, argsCopy)
+ if err != nil {
+ return nil, nil, errors.Wrapf(err, "failed to refresh link")
+ }
+ return newLink, file, nil
+ }
+ }
+
ol := &objWithLink{link: link, obj: file}
if link.Expiration != nil {
Cache.linkCache.SetTypeWithTTL(key, typeKey, ol, *link.Expiration)
- } else {
+ } else if link.RequireReference {
+ // 本地文件等需要引用计数的链接,缓存与文件句柄生命周期绑定
Cache.linkCache.SetTypeWithExpirable(key, typeKey, ol, &link.SyncClosers)
+ } else {
+ // 不需要引用计数(如云盘链接无过期时间),使用默认 TTL,多客户端复用
+ Cache.linkCache.SetType(key, typeKey, ol)
}
return ol, nil
}
@@ -326,9 +356,16 @@ func MakeDir(ctx context.Context, storage driver.Driver, path string) error {
return nil, errors.WithMessagef(err, "failed to make parent dir [%s]", parentPath)
}
parentDir, err := GetUnwrap(ctx, storage, parentPath)
- // this should not happen
if err != nil {
- return nil, errors.WithMessagef(err, "failed to get parent dir [%s]", parentPath)
+ if errs.IsObjectNotFound(err) {
+ // Retry once after a short delay (handles cloud storage API sync delay)
+ log.Debugf("[op] parent dir [%s] not found immediately after creation, retrying...", parentPath)
+ time.Sleep(100 * time.Millisecond)
+ parentDir, err = GetUnwrap(ctx, storage, parentPath)
+ }
+ if err != nil {
+ return nil, errors.WithMessagef(err, "failed to get parent dir [%s]", parentPath)
+ }
}
if model.ObjHasMask(parentDir, model.NoWrite) {
return nil, errors.WithStack(errs.PermissionDenied)
@@ -358,6 +395,7 @@ func MakeDir(ctx context.Context, storage driver.Driver, path string) error {
Modified: t,
Ctime: t,
Mask: model.Temp,
+ HashInfo: utils.NewHashInfo(nil, ""),
}
}
dirCache.UpdateObject("", wrapObjName(storage, newObj))
@@ -682,6 +720,7 @@ func Put(ctx context.Context, storage driver.Driver, dstDirPath string, file mod
Modified: file.ModTime(),
Ctime: file.CreateTime(),
Mask: model.Temp,
+ HashInfo: utils.NewHashInfo(nil, ""),
}
}
newObj = wrapObjName(storage, newObj)
@@ -750,6 +789,7 @@ func PutURL(ctx context.Context, storage driver.Driver, dstDirPath, dstName, url
Modified: t,
Ctime: t,
Mask: model.Temp,
+ HashInfo: utils.NewHashInfo(nil, ""),
}
}
newObj = wrapObjName(storage, newObj)
diff --git a/internal/op/storage.go b/internal/op/storage.go
index da4c84e31..2e93bf569 100644
--- a/internal/op/storage.go
+++ b/internal/op/storage.go
@@ -368,7 +368,9 @@ func GetStorageVirtualFilesWithDetailsByPath(ctx context.Context, prefix string,
}(d)
select {
case r := <-resultChan:
- ret.StorageDetails = r
+ if r != nil {
+ ret.StorageDetails = r
+ }
case <-time.After(time.Second):
}
return ret
@@ -419,6 +421,7 @@ func getStorageVirtualFilesByPath(prefix string, rootCallback func(driver.Driver
Name: name,
Modified: v.GetStorage().Modified,
IsFolder: true,
+ HashInfo: utils.NewHashInfo(nil, ""),
}
if !found {
idx := len(files)
diff --git a/internal/stream/stream.go b/internal/stream/stream.go
index 4c8238100..7eec75dd9 100644
--- a/internal/stream/stream.go
+++ b/internal/stream/stream.go
@@ -211,7 +211,9 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
return io.NewSectionReader(f.GetFile(), httpRange.Start, httpRange.Length), nil
}
- cache, err := f.cache(httpRange.Start + httpRange.Length)
+ // 限制缓存大小,避免累积缓存整个文件
+ maxCache := min(httpRange.Start+httpRange.Length, int64(conf.MaxBufferLimit))
+ cache, err := f.cache(maxCache)
if err != nil {
return nil, err
}
@@ -224,31 +226,13 @@ func (f *FileStream) RangeRead(httpRange http_range.Range) (io.Reader, error) {
// 即使被写入的数据量与Buffer.Cap一致,Buffer也会扩大
// 确保指定大小的数据被缓存
+// 注意:此方法只缓存到 maxCacheSize,不会缓存整个文件
func (f *FileStream) cache(maxCacheSize int64) (model.File, error) {
+ // 限制缓存大小,避免超大文件占用过多资源
+ // 如果需要缓存整个文件,应该显式调用 CacheFullAndWriter
if maxCacheSize > int64(conf.MaxBufferLimit) {
- size := f.GetSize()
- reader := f.Reader
- if f.peekBuff != nil {
- size -= f.peekBuff.Size()
- reader = f.oriReader
- }
- tmpF, err := utils.CreateTempFile(reader, size)
- if err != nil {
- return nil, err
- }
- f.Add(utils.CloseFunc(func() error {
- return errors.Join(tmpF.Close(), os.RemoveAll(tmpF.Name()))
- }))
- if f.peekBuff != nil {
- peekF, err := buffer.NewPeekFile(f.peekBuff, tmpF)
- if err != nil {
- return nil, err
- }
- f.Reader = peekF
- return peekF, nil
- }
- f.Reader = tmpF
- return tmpF, nil
+ // 不再创建整个文件的临时文件,只缓存到 MaxBufferLimit
+ maxCacheSize = int64(conf.MaxBufferLimit)
}
if f.peekBuff == nil {
@@ -315,15 +299,9 @@ func NewSeekableStream(fs *FileStream, link *model.Link) (*SeekableStream, error
if err != nil {
return nil, err
}
- if _, ok := rr.(*model.FileRangeReader); ok {
- var rc io.ReadCloser
- rc, err = rr.RangeRead(fs.Ctx, http_range.Range{Length: -1})
- if err != nil {
- return nil, err
- }
- fs.Reader = rc
- fs.Add(rc)
- }
+ // IMPORTANT: Do NOT create Reader early for FileRangeReader!
+ // Let generateReader() create it on-demand when actually needed for reading
+ // This prevents the Reader from being consumed by intermediate operations like hash calculation
fs.size = size
fs.Add(link)
return &SeekableStream{FileStream: fs, rangeReader: rr}, nil
diff --git a/internal/stream/util.go b/internal/stream/util.go
index 6aa3dda5d..d54776399 100644
--- a/internal/stream/util.go
+++ b/internal/stream/util.go
@@ -9,6 +9,9 @@ import (
"io"
"net/http"
"os"
+ "strings"
+ "sync"
+ "time"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
"github.com/OpenListTeam/OpenList/v4/internal/errs"
@@ -21,13 +24,247 @@ import (
log "github.com/sirupsen/logrus"
)
+const (
+ // 链接刷新相关常量
+ MAX_LINK_REFRESH_COUNT = 50 // 下载链接最大刷新次数(支持长时间传输)
+
+ // RangeRead 重试相关常量
+ MAX_RANGE_READ_RETRY_COUNT = 5 // RangeRead 最大重试次数(从3增加到5)
+)
+
type RangeReaderFunc func(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error)
func (f RangeReaderFunc) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
return f(ctx, httpRange)
}
+// IsLinkExpiredError checks if the error indicates an expired download link
+func IsLinkExpiredError(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ // Don't treat context cancellation as link expiration
+ // This happens when user pauses/seeks video or cancels download
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return false
+ }
+
+ errStr := strings.ToLower(err.Error())
+
+ // Common expired link error keywords
+ expiredKeywords := []string{
+ "expired", "invalid signature", "token expired",
+ "access denied", "forbidden", "unauthorized",
+ "link has expired", "url expired", "request has expired",
+ "signature expired", "accessdenied", "invalidtoken",
+ }
+ for _, keyword := range expiredKeywords {
+ if strings.Contains(errStr, keyword) {
+ return true
+ }
+ }
+
+ // Check for HTTP status codes that typically indicate expired links
+ if statusErr, ok := errs.UnwrapOrSelf(err).(net.HttpStatusCodeError); ok {
+ code := int(statusErr)
+ // All 4xx client errors may indicate expired/invalid links
+ // 400 Bad Request, 401 Unauthorized, 403 Forbidden, 404 Not Found, 410 Gone, etc.
+ if code >= 400 && code < 500 {
+ return true
+ }
+ }
+
+ return false
+}
+
+// RefreshableRangeReader wraps a RangeReader with link refresh capability
+type RefreshableRangeReader struct {
+ link *model.Link
+ size int64
+ innerReader model.RangeReaderIF
+ mu sync.Mutex
+ refreshCount int // track refresh count to avoid infinite loops
+}
+
+// NewRefreshableRangeReader creates a new RefreshableRangeReader
+func NewRefreshableRangeReader(link *model.Link, size int64) *RefreshableRangeReader {
+ return &RefreshableRangeReader{
+ link: link,
+ size: size,
+ }
+}
+
+func (r *RefreshableRangeReader) getInnerReader() (model.RangeReaderIF, error) {
+ if r.innerReader != nil {
+ return r.innerReader, nil
+ }
+
+ // Create inner reader without Refresher to avoid recursion
+ linkCopy := *r.link
+ linkCopy.Refresher = nil
+
+ reader, err := GetRangeReaderFromLink(r.size, &linkCopy)
+ if err != nil {
+ return nil, err
+ }
+ r.innerReader = reader
+ return reader, nil
+}
+
+func (r *RefreshableRangeReader) RangeRead(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
+ r.mu.Lock()
+ reader, err := r.getInnerReader()
+ r.mu.Unlock()
+ if err != nil {
+ return nil, err
+ }
+
+ rc, err := reader.RangeRead(ctx, httpRange)
+ if err != nil {
+ // Check if we should try to refresh on initial connection error
+ if IsLinkExpiredError(err) && r.link.Refresher != nil {
+ rc, err = r.refreshAndRetry(ctx, httpRange)
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ // Wrap the ReadCloser with self-healing capability to detect 0-byte reads
+ // This handles cases where cloud providers return 200 OK but empty body for expired links
+ return &selfHealingReadCloser{
+ ReadCloser: rc,
+ refresher: r,
+ ctx: ctx,
+ httpRange: httpRange,
+ firstRead: false,
+ closed: false,
+ }, nil
+}
+
+func (r *RefreshableRangeReader) refreshAndRetry(ctx context.Context, httpRange http_range.Range) (io.ReadCloser, error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if err := r.doRefreshLocked(ctx); err != nil {
+ return nil, err
+ }
+
+ reader, err := r.getInnerReader()
+ if err != nil {
+ return nil, err
+ }
+ return reader.RangeRead(ctx, httpRange)
+}
+
+// doRefreshLocked 执行实际的刷新逻辑(需要持有锁)
+func (r *RefreshableRangeReader) doRefreshLocked(ctx context.Context) error {
+ if r.refreshCount >= MAX_LINK_REFRESH_COUNT {
+ return fmt.Errorf("max refresh attempts (%d) reached", MAX_LINK_REFRESH_COUNT)
+ }
+
+ log.Infof("Link expired, attempting to refresh...")
+ // Use independent context for refresh to prevent cancellation from affecting link refresh
+ refreshCtx := context.WithoutCancel(ctx)
+ newLink, _, refreshErr := r.link.Refresher(refreshCtx)
+ if refreshErr != nil {
+ return fmt.Errorf("failed to refresh link: %w", refreshErr)
+ }
+
+ newLink.Refresher = r.link.Refresher
+ r.link = newLink
+ r.innerReader = nil
+ r.refreshCount++
+
+ log.Infof("Link refreshed successfully")
+ return nil
+}
+
+// selfHealingReadCloser wraps an io.ReadCloser and automatically refreshes the link
+// if it detects 0-byte reads (common with expired links from some cloud providers)
+type selfHealingReadCloser struct {
+ io.ReadCloser
+ refresher *RefreshableRangeReader
+ ctx context.Context
+ httpRange http_range.Range
+ firstRead bool
+ closed bool
+ mu sync.Mutex
+}
+
+func (s *selfHealingReadCloser) Read(p []byte) (n int, err error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if s.closed {
+ return 0, errors.New("read from closed reader")
+ }
+
+ n, err = s.ReadCloser.Read(p)
+
+ // Detect 0-byte read on first attempt (indicates link may be expired but returned 200 OK)
+ if !s.firstRead && n == 0 && (err == io.EOF || err == io.ErrUnexpectedEOF) {
+ s.firstRead = true
+ log.Warnf("Detected 0-byte read on first attempt, attempting to refresh link...")
+
+ // Try to refresh the link
+ s.refresher.mu.Lock()
+ refreshErr := s.refresher.doRefreshLocked(s.ctx)
+ s.refresher.mu.Unlock()
+
+ if refreshErr != nil {
+ log.Errorf("Failed to refresh link after 0-byte read: %v", refreshErr)
+ return n, err
+ }
+
+ // Close old connection
+ s.ReadCloser.Close()
+
+ // Get new reader and retry
+ s.refresher.mu.Lock()
+ reader, getErr := s.refresher.getInnerReader()
+ s.refresher.mu.Unlock()
+
+ if getErr != nil {
+ log.Errorf("Failed to get inner reader after refresh: %v", getErr)
+ return n, err
+ }
+
+ newRc, rangeErr := reader.RangeRead(s.ctx, s.httpRange)
+ if rangeErr != nil {
+ log.Errorf("Failed to create new range reader after refresh: %v", rangeErr)
+ return n, err
+ }
+
+ s.ReadCloser = newRc
+ log.Infof("Successfully refreshed link and reconnected after 0-byte read")
+
+ // Retry read with new connection
+ return s.ReadCloser.Read(p)
+ }
+
+ s.firstRead = true
+ return n, err
+}
+
+func (s *selfHealingReadCloser) Close() error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+
+ if s.closed {
+ return nil
+ }
+ s.closed = true
+ return s.ReadCloser.Close()
+}
+
func GetRangeReaderFromLink(size int64, link *model.Link) (model.RangeReaderIF, error) {
+ // If link has a Refresher, wrap with RefreshableRangeReader for automatic refresh on expiry
+ if link.Refresher != nil {
+ return NewRefreshableRangeReader(link, size), nil
+ }
+
if link.RangeReader != nil {
if link.Concurrency < 1 && link.PartSize < 1 {
return link.RangeReader, nil
@@ -174,6 +411,113 @@ func CacheFullAndHash(stream model.FileStreamer, up *model.UpdateProgress, hashT
return tmpF, hex.EncodeToString(h.Sum(nil)), nil
}
+// ReadFullWithRangeRead 使用 RangeRead 从文件流中读取数据到 buf
+// file: 文件流
+// buf: 目标缓冲区
+// off: 读取的起始偏移量
+// 返回值: 实际读取的字节数和错误
+// 支持自动重试(最多5次),快速重试策略(1秒、2秒、3秒、4秒、5秒)
+// 注意:链接刷新现在由 RefreshableRangeReader 内部的 selfHealingReadCloser 自动处理
+func ReadFullWithRangeRead(file model.FileStreamer, buf []byte, off int64) (int, error) {
+ length := int64(len(buf))
+ var lastErr error
+
+ // 重试最多 MAX_RANGE_READ_RETRY_COUNT 次
+ for retry := 0; retry < MAX_RANGE_READ_RETRY_COUNT; retry++ {
+ reader, err := file.RangeRead(http_range.Range{Start: off, Length: length})
+ if err != nil {
+ lastErr = fmt.Errorf("RangeRead failed at offset %d: %w", off, err)
+ log.Debugf("RangeRead retry %d failed: %v", retry+1, lastErr)
+ // 快速重试:1秒、2秒、3秒、4秒、5秒(连接失败快速重试)
+ time.Sleep(time.Duration(retry+1) * time.Second)
+ continue
+ }
+
+ n, err := io.ReadFull(reader, buf)
+ if closer, ok := reader.(io.Closer); ok {
+ closer.Close()
+ }
+
+ if err == nil {
+ return n, nil
+ }
+
+ lastErr = fmt.Errorf("failed to read all data via RangeRead at offset %d: (expect=%d, actual=%d) %w", off, length, n, err)
+ log.Debugf("RangeRead retry %d read failed: %v", retry+1, lastErr)
+
+ // 快速重试:1秒、2秒、3秒、4秒、5秒(读取失败快速重试)
+ // 注意:0字节读取导致的链接过期现在由 selfHealingReadCloser 自动处理
+ time.Sleep(time.Duration(retry+1) * time.Second)
+ }
+
+ return 0, lastErr
+}
+
+// StreamHashFile 流式计算文件哈希值,避免将整个文件加载到内存
+// file: 文件流
+// hashType: 哈希算法类型
+// progressWeight: 进度权重(0-100),用于计算整体进度
+// up: 进度回调函数
+func StreamHashFile(file model.FileStreamer, hashType *utils.HashType, progressWeight float64, up *model.UpdateProgress) (string, error) {
+ // 如果已经有完整缓存文件,直接使用
+ if cache := file.GetFile(); cache != nil {
+ hashFunc := hashType.NewFunc()
+ cache.Seek(0, io.SeekStart)
+ _, err := io.Copy(hashFunc, cache)
+ if err != nil {
+ return "", err
+ }
+ if up != nil && progressWeight > 0 {
+ (*up)(progressWeight)
+ }
+ return hex.EncodeToString(hashFunc.Sum(nil)), nil
+ }
+
+ hashFunc := hashType.NewFunc()
+ size := file.GetSize()
+ chunkSize := int64(10 * 1024 * 1024) // 10MB per chunk
+ buf := make([]byte, chunkSize)
+ var offset int64 = 0
+
+ for offset < size {
+ readSize := chunkSize
+ if size-offset < chunkSize {
+ readSize = size - offset
+ }
+
+ var n int
+ var err error
+
+ // 对于 SeekableStream,优先使用 RangeRead 避免消耗 Reader
+ // 这样后续发送时 Reader 还能正常工作
+ if _, ok := file.(*SeekableStream); ok {
+ n, err = ReadFullWithRangeRead(file, buf[:readSize], offset)
+ } else {
+ // 对于 FileStream,首先尝试顺序流读取(不消耗额外资源,适用于所有流类型)
+ n, err = io.ReadFull(file, buf[:readSize])
+ if err != nil {
+ // 顺序流读取失败,尝试使用 RangeRead 重试(适用于 SeekableStream)
+ log.Warnf("StreamHashFile: sequential read failed at offset %d, retrying with RangeRead: %v", offset, err)
+ n, err = ReadFullWithRangeRead(file, buf[:readSize], offset)
+ }
+ }
+
+ if err != nil {
+ return "", fmt.Errorf("calculate hash failed at offset %d: %w", offset, err)
+ }
+
+ hashFunc.Write(buf[:n])
+ offset += int64(n)
+
+ if up != nil && progressWeight > 0 {
+ progress := progressWeight * float64(offset) / float64(size)
+ (*up)(progress)
+ }
+ }
+
+ return hex.EncodeToString(hashFunc.Sum(nil)), nil
+}
+
type StreamSectionReaderIF interface {
// 线程不安全
GetSectionReader(off, length int64) (io.ReadSeeker, error)
@@ -188,37 +532,9 @@ func NewStreamSectionReader(file model.FileStreamer, maxBufferSize int, up *mode
}
maxBufferSize = min(maxBufferSize, int(file.GetSize()))
- if maxBufferSize > conf.MaxBufferLimit {
- f, err := os.CreateTemp(conf.Conf.TempDir, "file-*")
- if err != nil {
- return nil, err
- }
-
- if f.Truncate(file.GetSize()) != nil {
- // fallback to full cache
- _, _ = f.Close(), os.Remove(f.Name())
- cache, err := file.CacheFullAndWriter(up, nil)
- if err != nil {
- return nil, err
- }
- return &cachedSectionReader{cache}, nil
- }
-
- ss := &fileSectionReader{file: file, temp: f}
- ss.bufPool = &pool.Pool[*offsetWriterWithBase]{
- New: func() *offsetWriterWithBase {
- base := ss.tempOffset
- ss.tempOffset += int64(maxBufferSize)
- return &offsetWriterWithBase{io.NewOffsetWriter(ss.temp, base), base}
- },
- }
- file.Add(utils.CloseFunc(func() error {
- ss.bufPool.Reset()
- return errors.Join(ss.temp.Close(), os.Remove(ss.temp.Name()))
- }))
- return ss, nil
- }
+ // 始终使用 directSectionReader,只在内存中缓存当前分片
+ // 避免创建临时文件导致中间文件增长到整个文件大小
ss := &directSectionReader{file: file}
if conf.MmapThreshold > 0 && maxBufferSize >= conf.MmapThreshold {
ss.bufPool = &pool.Pool[[]byte]{
@@ -321,8 +637,16 @@ type directSectionReader struct {
bufPool *pool.Pool[[]byte]
}
-// 线程不安全
+// 线程不安全(依赖调用方保证串行调用)
+// 对于 SeekableStream:直接跳过(无需实际读取)
+// 对于 FileStream:必须顺序读取并丢弃
func (ss *directSectionReader) DiscardSection(off int64, length int64) error {
+ // 对于 SeekableStream,直接跳过(RangeRead 支持随机访问,不需要实际读取)
+ if _, ok := ss.file.(*SeekableStream); ok {
+ return nil
+ }
+
+ // 对于 FileStream,必须顺序读取并丢弃
if off != ss.fileOffset {
return fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.fileOffset)
}
@@ -339,19 +663,37 @@ type bufferSectionReader struct {
buf []byte
}
-// 线程不安全
+// 线程不安全(依赖调用方保证串行调用)
+// 对于 SeekableStream:使用 RangeRead,支持随机访问(续传场景可跳过已上传分片)
+// 对于 FileStream:必须顺序读取
func (ss *directSectionReader) GetSectionReader(off, length int64) (io.ReadSeeker, error) {
+ tempBuf := ss.bufPool.Get()
+ buf := tempBuf[:length]
+
+ // 对于 SeekableStream,直接使用 RangeRead(支持随机访问,适用于续传场景)
+ if _, ok := ss.file.(*SeekableStream); ok {
+ n, err := ReadFullWithRangeRead(ss.file, buf, off)
+ if err != nil {
+ ss.bufPool.Put(tempBuf)
+ return nil, fmt.Errorf("RangeRead failed at offset %d: (expect=%d, actual=%d) %w", off, length, n, err)
+ }
+ return &bufferSectionReader{bytes.NewReader(buf), tempBuf}, nil
+ }
+
+ // 对于 FileStream,必须顺序读取
if off != ss.fileOffset {
+ ss.bufPool.Put(tempBuf)
return nil, fmt.Errorf("stream not cached: request offset %d != current offset %d", off, ss.fileOffset)
}
- tempBuf := ss.bufPool.Get()
- buf := tempBuf[:length]
+
n, err := io.ReadFull(ss.file, buf)
- ss.fileOffset += int64(n)
- if int64(n) != length {
- return nil, fmt.Errorf("failed to read all data: (expect =%d, actual =%d) %w", length, n, err)
+ if err != nil {
+ ss.bufPool.Put(tempBuf)
+ return nil, fmt.Errorf("sequential read failed at offset %d: (expect=%d, actual=%d) %w", off, length, n, err)
}
- return &bufferSectionReader{bytes.NewReader(buf), buf}, nil
+
+ ss.fileOffset = off + int64(n)
+ return &bufferSectionReader{bytes.NewReader(buf), tempBuf}, nil
}
func (ss *directSectionReader) FreeSectionReader(rs io.ReadSeeker) {
if sr, ok := rs.(*bufferSectionReader); ok {
diff --git a/pkg/utils/hash.go b/pkg/utils/hash.go
index 596e61e54..c4b4e735f 100644
--- a/pkg/utils/hash.go
+++ b/pkg/utils/hash.go
@@ -90,6 +90,12 @@ var (
// SHA256 indicates SHA-256 support
SHA256 = RegisterHash("sha256", "SHA-256", 64, sha256.New)
+
+ // SHA1_128K is SHA1 of first 128KB, used by 115 driver for rapid upload
+ SHA1_128K = RegisterHash("sha1_128k", "SHA1-128K", 40, sha1.New)
+
+ // PRE_HASH is SHA1 of first 1024 bytes, used by Aliyundrive for rapid upload
+ PRE_HASH = RegisterHash("pre_hash", "PRE-HASH", 40, sha1.New)
)
// HashData get hash of one hashType
diff --git a/server/handles/archive.go b/server/handles/archive.go
index 56418de26..4fd405688 100644
--- a/server/handles/archive.go
+++ b/server/handles/archive.go
@@ -231,7 +231,7 @@ func FsArchiveList(c *gin.Context, req *ArchiveListReq, user *model.User) {
type ArchiveDecompressReq struct {
SrcDir string `json:"src_dir" form:"src_dir"`
DstDir string `json:"dst_dir" form:"dst_dir"`
- Name []string `json:"name" form:"name"`
+ Names []string `json:"name" form:"name"`
ArchivePass string `json:"archive_pass" form:"archive_pass"`
InnerPath string `json:"inner_path" form:"inner_path"`
CacheFull bool `json:"cache_full" form:"cache_full"`
@@ -250,8 +250,8 @@ func FsArchiveDecompress(c *gin.Context) {
common.ErrorResp(c, errs.PermissionDenied, 403)
return
}
- srcPaths := make([]string, 0, len(req.Name))
- for _, name := range req.Name {
+ srcPaths := make([]string, 0, len(req.Names))
+ for _, name := range req.Names {
srcPath, err := user.JoinPath(stdpath.Join(req.SrcDir, name))
if err != nil {
common.ErrorResp(c, err, 403)
diff --git a/server/handles/fsmanage.go b/server/handles/fsmanage.go
index 8247fa8cb..62382a27c 100644
--- a/server/handles/fsmanage.go
+++ b/server/handles/fsmanage.go
@@ -6,18 +6,18 @@ import (
"strings"
"github.com/OpenListTeam/OpenList/v4/internal/conf"
- "github.com/OpenListTeam/OpenList/v4/internal/task"
-
"github.com/OpenListTeam/OpenList/v4/internal/errs"
"github.com/OpenListTeam/OpenList/v4/internal/fs"
"github.com/OpenListTeam/OpenList/v4/internal/model"
"github.com/OpenListTeam/OpenList/v4/internal/op"
"github.com/OpenListTeam/OpenList/v4/internal/sign"
+ "github.com/OpenListTeam/OpenList/v4/internal/task"
"github.com/OpenListTeam/OpenList/v4/pkg/generic"
"github.com/OpenListTeam/OpenList/v4/pkg/utils"
"github.com/OpenListTeam/OpenList/v4/server/common"
"github.com/gin-gonic/gin"
"github.com/pkg/errors"
+ log "github.com/sirupsen/logrus"
)
type MkdirOrLinkReq struct {
@@ -80,36 +80,44 @@ func FsMove(c *gin.Context) {
common.ErrorResp(c, errs.PermissionDenied, 403)
return
}
- srcDir, err := user.JoinPath(req.SrcDir)
- if err != nil {
- common.ErrorResp(c, err, 403)
- return
- }
dstDir, err := user.JoinPath(req.DstDir)
if err != nil {
common.ErrorResp(c, err, 403)
return
}
- var validNames []string
- if !req.Overwrite {
- for _, name := range req.Names {
- if res, _ := fs.Get(c.Request.Context(), stdpath.Join(dstDir, name), &fs.GetArgs{NoLog: true}); res != nil && !req.SkipExisting {
- common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", name), 403)
+ validPaths := make([]string, 0, len(req.Names))
+ for _, name := range req.Names {
+ // ensure req.Names is not a relative path
+ srcPath := stdpath.Join(req.SrcDir, name)
+ srcPath, err = user.JoinPath(srcPath)
+ if err != nil {
+ common.ErrorResp(c, err, 403)
+ return
+ }
+ if !req.Overwrite {
+ base := stdpath.Base(srcPath)
+ if base == "." || base == "/" {
+ common.ErrorStrResp(c, fmt.Sprintf("invalid file name [%s]", name), 400)
return
- } else if res == nil {
- validNames = append(validNames, name)
+ }
+ if res, _ := fs.Get(c.Request.Context(), stdpath.Join(dstDir, base), &fs.GetArgs{NoLog: true}); res != nil {
+ if !req.SkipExisting {
+ common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", name), 403)
+ return
+ } else {
+ continue
+ }
}
}
- } else {
- validNames = req.Names
+ validPaths = append(validPaths, srcPath)
}
// Create all tasks immediately without any synchronous validation
// All validation will be done asynchronously in the background
var addedTasks []task.TaskExtensionInfo
- for i, name := range validNames {
- t, err := fs.Move(c.Request.Context(), stdpath.Join(srcDir, name), dstDir, len(validNames) > i+1)
+ for i, p := range validPaths {
+ t, err := fs.Move(c.Request.Context(), p, dstDir, len(validPaths) > i+1)
if t != nil {
addedTasks = append(addedTasks, t)
}
@@ -147,44 +155,48 @@ func FsCopy(c *gin.Context) {
common.ErrorResp(c, errs.PermissionDenied, 403)
return
}
- srcDir, err := user.JoinPath(req.SrcDir)
- if err != nil {
- common.ErrorResp(c, err, 403)
- return
- }
dstDir, err := user.JoinPath(req.DstDir)
if err != nil {
common.ErrorResp(c, err, 403)
return
}
- var validNames []string
- if !req.Overwrite {
- for _, name := range req.Names {
- if res, _ := fs.Get(c.Request.Context(), stdpath.Join(dstDir, name), &fs.GetArgs{NoLog: true}); res != nil {
+ validPaths := make([]string, 0, len(req.Names))
+ for _, name := range req.Names {
+ // ensure req.Names is not a relative path
+ srcPath := stdpath.Join(req.SrcDir, name)
+ srcPath, err = user.JoinPath(srcPath)
+ if err != nil {
+ common.ErrorResp(c, err, 403)
+ return
+ }
+ if !req.Overwrite {
+ base := stdpath.Base(srcPath)
+ if base == "." || base == "/" {
+ common.ErrorStrResp(c, fmt.Sprintf("invalid file name [%s]", name), 400)
+ return
+ }
+ if res, _ := fs.Get(c.Request.Context(), stdpath.Join(dstDir, base), &fs.GetArgs{NoLog: true}); res != nil {
if !req.SkipExisting && !req.Merge {
common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", name), 403)
return
- } else if req.Merge && res.IsDir() {
- validNames = append(validNames, name)
+ } else if !req.Merge || !res.IsDir() {
+ continue
}
- } else {
- validNames = append(validNames, name)
}
}
- } else {
- validNames = req.Names
+ validPaths = append(validPaths, srcPath)
}
// Create all tasks immediately without any synchronous validation
// All validation will be done asynchronously in the background
var addedTasks []task.TaskExtensionInfo
- for i, name := range validNames {
+ for i, p := range validPaths {
var t task.TaskExtensionInfo
if req.Merge {
- t, err = fs.Merge(c.Request.Context(), stdpath.Join(srcDir, name), dstDir, len(validNames) > i+1)
+ t, err = fs.Merge(c.Request.Context(), p, dstDir, len(validPaths) > i+1)
} else {
- t, err = fs.Copy(c.Request.Context(), stdpath.Join(srcDir, name), dstDir, len(validNames) > i+1)
+ t, err = fs.Copy(c.Request.Context(), p, dstDir, len(validPaths) > i+1)
}
if t != nil {
addedTasks = append(addedTasks, t)
@@ -276,13 +288,25 @@ func FsRemove(c *gin.Context) {
common.ErrorResp(c, errs.PermissionDenied, 403)
return
}
- reqDir, err := user.JoinPath(req.Dir)
- if err != nil {
- common.ErrorResp(c, err, 403)
- return
+ for i, name := range req.Names {
+ if strings.TrimSpace(utils.FixAndCleanPath(name)) == "/" {
+ log.Warnf("FsRemove: invalid item skipped: %s (parent directory: %s)\n", name, req.Dir)
+ req.Names[i] = ""
+ continue
+ }
+ // ensure req.Names is not a relative path
+ var err error
+ req.Names[i], err = user.JoinPath(stdpath.Join(req.Dir, name))
+ if err != nil {
+ common.ErrorResp(c, err, 403)
+ return
+ }
}
- for _, name := range req.Names {
- err := fs.Remove(c.Request.Context(), stdpath.Join(reqDir, name))
+ for _, path := range req.Names {
+ if path == "" {
+ continue
+ }
+ err := fs.Remove(c.Request.Context(), path)
if err != nil {
common.ErrorResp(c, err, 500)
return
diff --git a/server/handles/fsread.go b/server/handles/fsread.go
index 886da9dc9..995f84423 100644
--- a/server/handles/fsread.go
+++ b/server/handles/fsread.go
@@ -231,6 +231,10 @@ func toObjsResp(objs []model.Obj, parent string, encrypt bool) []ObjResp {
for _, obj := range objs {
thumb, _ := model.GetThumb(obj)
mountDetails, _ := model.GetStorageDetails(obj)
+ hashInfo := obj.GetHash().Export()
+ if hashInfo == nil {
+ hashInfo = make(map[*utils.HashType]string)
+ }
resp = append(resp, ObjResp{
Name: obj.GetName(),
Size: obj.GetSize(),
@@ -238,7 +242,7 @@ func toObjsResp(objs []model.Obj, parent string, encrypt bool) []ObjResp {
Modified: obj.ModTime(),
Created: obj.CreateTime(),
HashInfoStr: obj.GetHash().String(),
- HashInfo: obj.GetHash().Export(),
+ HashInfo: hashInfo,
Sign: common.Sign(obj, parent, encrypt),
Thumb: thumb,
Type: utils.GetObjType(obj.GetName(), obj.IsDir()),
diff --git a/server/handles/fsup.go b/server/handles/fsup.go
index 0f46398cd..54cdb4fee 100644
--- a/server/handles/fsup.go
+++ b/server/handles/fsup.go
@@ -93,6 +93,12 @@ func FsStream(c *gin.Context) {
if sha256 := c.GetHeader("X-File-Sha256"); sha256 != "" {
h[utils.SHA256] = sha256
}
+ if sha1_128k := c.GetHeader("X-File-Sha1-128k"); sha1_128k != "" {
+ h[utils.SHA1_128K] = sha1_128k
+ }
+ if preHash := c.GetHeader("X-File-Pre-Hash"); preHash != "" {
+ h[utils.PRE_HASH] = preHash
+ }
mimetype := c.GetHeader("Content-Type")
if len(mimetype) == 0 {
mimetype = utils.GetMimeType(name)
@@ -190,6 +196,12 @@ func FsForm(c *gin.Context) {
if sha256 := c.GetHeader("X-File-Sha256"); sha256 != "" {
h[utils.SHA256] = sha256
}
+ if sha1_128k := c.GetHeader("X-File-Sha1-128k"); sha1_128k != "" {
+ h[utils.SHA1_128K] = sha1_128k
+ }
+ if preHash := c.GetHeader("X-File-Pre-Hash"); preHash != "" {
+ h[utils.PRE_HASH] = preHash
+ }
mimetype := file.Header.Get("Content-Type")
if len(mimetype) == 0 {
mimetype = utils.GetMimeType(name)