diff --git a/integrations/api_repo_test.go b/integrations/api_repo_test.go index 57fe65f4bf90..14f512d48605 100644 --- a/integrations/api_repo_test.go +++ b/integrations/api_repo_test.go @@ -337,10 +337,7 @@ func TestAPIRepoMigrate(t *testing.T) { cloneURL, repoName string expectedStatus int }{ - {ctxUserID: 1, userID: 2, cloneURL: "https://github.com/go-gitea/test_repo.git", repoName: "git-admin", expectedStatus: http.StatusCreated}, - {ctxUserID: 2, userID: 2, cloneURL: "https://github.com/go-gitea/test_repo.git", repoName: "git-own", expectedStatus: http.StatusCreated}, {ctxUserID: 2, userID: 1, cloneURL: "https://github.com/go-gitea/test_repo.git", repoName: "git-bad", expectedStatus: http.StatusForbidden}, - {ctxUserID: 2, userID: 3, cloneURL: "https://github.com/go-gitea/test_repo.git", repoName: "git-org", expectedStatus: http.StatusCreated}, {ctxUserID: 2, userID: 6, cloneURL: "https://github.com/go-gitea/test_repo.git", repoName: "git-bad-org", expectedStatus: http.StatusForbidden}, {ctxUserID: 2, userID: 3, cloneURL: "https://localhost:3000/user/test_repo.git", repoName: "private-ip", expectedStatus: http.StatusUnprocessableEntity}, {ctxUserID: 2, userID: 3, cloneURL: "https://10.0.0.1/user/test_repo.git", repoName: "private-ip", expectedStatus: http.StatusUnprocessableEntity}, diff --git a/integrations/dump_restore_test.go b/integrations/dump_restore_test.go index ef869c4ddabc..aa347feec9a9 100644 --- a/integrations/dump_restore_test.go +++ b/integrations/dump_restore_test.go @@ -12,20 +12,21 @@ import ( "os" "path/filepath" "reflect" - "strings" "testing" repo_model "code.gitea.io/gitea/models/repo" "code.gitea.io/gitea/models/unittest" user_model "code.gitea.io/gitea/models/user" + "code.gitea.io/gitea/modules/json" base "code.gitea.io/gitea/modules/migration" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/structs" - "code.gitea.io/gitea/modules/util" "code.gitea.io/gitea/services/migrations" "github.com/stretchr/testify/assert" - "gopkg.in/yaml.v2" + "lab.forgefriends.org/friendlyforgeformat/gofff" + "lab.forgefriends.org/friendlyforgeformat/gofff/forges/file" + "lab.forgefriends.org/friendlyforgeformat/gofff/format" ) func TestDumpRestore(t *testing.T) { @@ -40,83 +41,91 @@ func TestDumpRestore(t *testing.T) { setting.AppVer = AppVer }() - assert.NoError(t, migrations.Init()) - - reponame := "repo1" - - basePath, err := os.MkdirTemp("", reponame) - assert.NoError(t, err) - defer util.RemoveAll(basePath) - - repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{Name: reponame}).(*repo_model.Repository) - repoOwner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: repo.OwnerID}).(*user_model.User) + repoOwner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1}).(*user_model.User) session := loginUser(t, repoOwner.Name) token := getTokenForLoggedInUser(t, session) - // - // Phase 1: dump repo1 from the Gitea instance to the filesystem - // + fixture := file.NewFixture(t, gofff.AllFeatures) + fixture.CreateEverything(file.User{ + ID: repoOwner.ID, + Name: repoOwner.Name, + Email: repoOwner.Email, + }) - ctx := context.Background() - opts := migrations.MigrateOptions{ - GitServiceType: structs.GiteaService, - Issues: true, - PullRequests: true, - Labels: true, - Milestones: true, - Comments: true, - AuthToken: token, - CloneAddr: repo.CloneLink().HTTPS, - RepoName: reponame, - } - err = migrations.DumpRepository(ctx, basePath, repoOwner.Name, opts) - assert.NoError(t, err) - - // - // Verify desired side effects of the dump - // - d := filepath.Join(basePath, repo.OwnerName, repo.Name) - for _, f := range []string{"repo.yml", "topic.yml", "label.yml", "milestone.yml", "issue.yml"} { - assert.FileExists(t, filepath.Join(d, f)) - } + assert.NoError(t, migrations.Init()) + ctx := context.Background() // - // Phase 2: restore from the filesystem to the Gitea instance in restoredrepo + // Phase 1: restore from the filesystem to the Gitea instance in restoredrepo // - newreponame := "restored" - err = migrations.RestoreRepository(ctx, d, repo.OwnerName, newreponame, []string{ - "labels", "issues", "comments", "milestones", "pull_requests", + restoredRepoName := "restored" + restoredRepoDirectory := fixture.GetDirectory() + err := migrations.RestoreRepository(ctx, restoredRepoDirectory, repoOwner.Name, restoredRepoName, []string{ + "issues", "milestones", "labels", "releases", "release_assets", "comments", "pull_requests", + // wiki", }, false) assert.NoError(t, err) - newrepo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{Name: newreponame}).(*repo_model.Repository) + restoredRepo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{Name: restoredRepoName}).(*repo_model.Repository) + unittest.AssertExistsAndLoadBean(t, &repo_model.Attachment{Name: file.Asset1}) // - // Phase 3: dump restored from the Gitea instance to the filesystem + // Phase 2: dump restoredRepo from the Gitea instance to the filesystem // - opts.RepoName = newreponame - opts.CloneAddr = newrepo.CloneLink().HTTPS - err = migrations.DumpRepository(ctx, basePath, repoOwner.Name, opts) + + opts := base.MigrateOptions{ + GitServiceType: structs.GiteaService, + + Wiki: true, + Issues: true, + Milestones: true, + Labels: true, + Releases: true, + Comments: true, + PullRequests: true, + ReleaseAssets: true, + + AuthToken: token, + CloneAddr: restoredRepo.CloneLink().HTTPS, + RepoName: restoredRepoName, + } + dumpedRepoDirectory := t.TempDir() + err = migrations.DumpRepository(ctx, dumpedRepoDirectory, repoOwner.Name, opts) assert.NoError(t, err) // // Verify the dump of restored is the same as the dump of repo1 // + //fixture.AssertEquals(restoredRepoDirectory, dumpedRepoDirectory) + // + // Verify the fixture files are the same as the restored files + // + project := fixture.GetFile().GetProject() comparator := &compareDump{ - t: t, - basePath: basePath, + t: t, + + repoBefore: project.Name, + ownerBefore: project.Owner, + dirBefore: restoredRepoDirectory, + + repoAfter: restoredRepoName, + ownerAfter: repoOwner.Name, + dirAfter: dumpedRepoDirectory, } - comparator.assertEquals(repo, newrepo) + comparator.assertEquals() }) } type compareDump struct { - t *testing.T - basePath string - repoBefore *repo_model.Repository - dirBefore string - repoAfter *repo_model.Repository + t *testing.T + + repoBefore string + ownerBefore string + dirBefore string + + repoAfter string + ownerAfter string dirAfter string } @@ -130,57 +139,58 @@ type compareField struct { type compareFields map[string]compareField -func (c *compareDump) replaceRepoName(original string) string { - return strings.ReplaceAll(original, c.repoBefore.Name, c.repoAfter.Name) -} - -func (c *compareDump) assertEquals(repoBefore, repoAfter *repo_model.Repository) { - c.repoBefore = repoBefore - c.dirBefore = filepath.Join(c.basePath, repoBefore.OwnerName, repoBefore.Name) - c.repoAfter = repoAfter - c.dirAfter = filepath.Join(c.basePath, repoAfter.OwnerName, repoAfter.Name) - +func (c *compareDump) assertEquals() { // // base.Repository // - _ = c.assertEqual("repo.yml", base.Repository{}, compareFields{ + _ = c.assertEqual("project.json", format.Project{}, compareFields{ "Name": { - before: c.repoBefore.Name, - after: c.repoAfter.Name, + before: c.repoBefore, + after: c.repoAfter, + }, + "Owner": { + before: c.ownerBefore, + after: c.ownerAfter, }, - "CloneURL": {transform: c.replaceRepoName}, - "OriginalURL": {transform: c.replaceRepoName}, + "Index": {ignore: true}, + "CloneURL": {ignore: true}, }) // // base.Label // - labels, ok := c.assertEqual("label.yml", []base.Label{}, compareFields{}).([]*base.Label) + compareLabels := compareFields{ + "Index": {ignore: true}, + } + labels, ok := c.assertEqual("label.json", []format.Label{}, compareLabels).([]*format.Label) assert.True(c.t, ok) assert.GreaterOrEqual(c.t, len(labels), 1) // // base.Milestone // - milestones, ok := c.assertEqual("milestone.yml", []base.Milestone{}, compareFields{ + milestones, ok := c.assertEqual("milestone.json", []format.Milestone{}, compareFields{ + "Index": {ignore: true}, "Updated": {ignore: true}, // the database updates that field independently - }).([]*base.Milestone) + }).([]*format.Milestone) assert.True(c.t, ok) assert.GreaterOrEqual(c.t, len(milestones), 1) // - // base.Issue and the associated comments + // format.Issue and the associated comments // - issues, ok := c.assertEqual("issue.yml", []base.Issue{}, compareFields{ + issues, ok := c.assertEqual("issue.json", []format.Issue{}, compareFields{ + "Index": {ignore: true}, "Assignees": {ignore: true}, // not implemented yet - }).([]*base.Issue) + "Labels": {nested: &compareLabels}, + }).([]*format.Issue) assert.True(c.t, ok) assert.GreaterOrEqual(c.t, len(issues), 1) for _, issue := range issues { - filename := filepath.Join("comments", fmt.Sprintf("%d.yml", issue.Number)) - comments, ok := c.assertEqual(filename, []base.Comment{}, compareFields{ + filename := filepath.Join("comments", fmt.Sprintf("%d.json", issue.Number)) + comments, ok := c.assertEqual(filename, []format.Comment{}, compareFields{ "Index": {ignore: true}, - }).([]*base.Comment) + }).([]*format.Comment) assert.True(c.t, ok) for _, comment := range comments { assert.EqualValues(c.t, issue.Number, comment.IssueIndex) @@ -188,26 +198,32 @@ func (c *compareDump) assertEquals(repoBefore, repoAfter *repo_model.Repository) } // - // base.PullRequest and the associated comments + // format.PullRequest and the associated comments // comparePullRequestBranch := &compareFields{ "RepoName": { - before: c.repoBefore.Name, - after: c.repoAfter.Name, + before: c.repoBefore, + after: c.repoAfter, + }, + "OwnerName": { + before: c.ownerBefore, + after: c.ownerAfter, }, - "CloneURL": {transform: c.replaceRepoName}, + "CloneURL": {ignore: true}, } - prs, ok := c.assertEqual("pull_request.yml", []base.PullRequest{}, compareFields{ + prs, ok := c.assertEqual("pull_request.json", []format.PullRequest{}, compareFields{ "Assignees": {ignore: true}, // not implemented yet "Head": {nested: comparePullRequestBranch}, "Base": {nested: comparePullRequestBranch}, + "PatchURL": {ignore: true}, + "CloneURL": {ignore: true}, "Labels": {ignore: true}, // because org labels are not handled properly - }).([]*base.PullRequest) + }).([]*format.PullRequest) assert.True(c.t, ok) assert.GreaterOrEqual(c.t, len(prs), 1) for _, pr := range prs { - filename := filepath.Join("comments", fmt.Sprintf("%d.yml", pr.Number)) - comments, ok := c.assertEqual(filename, []base.Comment{}, compareFields{}).([]*base.Comment) + filename := filepath.Join("comments", fmt.Sprintf("%d.json", pr.Number)) + comments, ok := c.assertEqual(filename, []format.Comment{}, compareFields{}).([]*format.Comment) assert.True(c.t, ok) for _, comment := range comments { assert.EqualValues(c.t, pr.Number, comment.IssueIndex) @@ -215,7 +231,7 @@ func (c *compareDump) assertEquals(repoBefore, repoAfter *repo_model.Repository) } } -func (c *compareDump) assertLoadYAMLFiles(beforeFilename, afterFilename string, before, after interface{}) { +func (c *compareDump) assertLoadJSONFiles(beforeFilename, afterFilename string, before, after interface{}) { _, beforeErr := os.Stat(beforeFilename) _, afterErr := os.Stat(afterFilename) assert.EqualValues(c.t, errors.Is(beforeErr, os.ErrNotExist), errors.Is(afterErr, os.ErrNotExist)) @@ -225,10 +241,10 @@ func (c *compareDump) assertLoadYAMLFiles(beforeFilename, afterFilename string, beforeBytes, err := os.ReadFile(beforeFilename) assert.NoError(c.t, err) - assert.NoError(c.t, yaml.Unmarshal(beforeBytes, before)) + assert.NoError(c.t, json.Unmarshal(beforeBytes, before)) afterBytes, err := os.ReadFile(afterFilename) assert.NoError(c.t, err) - assert.NoError(c.t, yaml.Unmarshal(afterBytes, after)) + assert.NoError(c.t, json.Unmarshal(afterBytes, after)) } func (c *compareDump) assertLoadFiles(beforeFilename, afterFilename string, t reflect.Type) (before, after reflect.Value) { @@ -251,13 +267,14 @@ func (c *compareDump) assertLoadFiles(beforeFilename, afterFilename string, t re beforePtr = reflect.New(t) afterPtr = reflect.New(t) } - c.assertLoadYAMLFiles(beforeFilename, afterFilename, beforePtr.Interface(), afterPtr.Interface()) + c.assertLoadJSONFiles(beforeFilename, afterFilename, beforePtr.Interface(), afterPtr.Interface()) return beforePtr.Elem(), afterPtr.Elem() } func (c *compareDump) assertEqual(filename string, kind interface{}, fields compareFields) (i interface{}) { beforeFilename := filepath.Join(c.dirBefore, filename) afterFilename := filepath.Join(c.dirAfter, filename) + fmt.Println("assertEqual ", beforeFilename, afterFilename) typeOf := reflect.TypeOf(kind) before, after := c.assertLoadFiles(beforeFilename, afterFilename, typeOf) @@ -300,29 +317,34 @@ func (c *compareDump) assertEqualValues(before, after reflect.Value, fields comp // Transform these strings before comparing them // bs, ok := bi.(string) - assert.True(c.t, ok) + assert.True(c.t, ok, field.Name) as, ok := ai.(string) - assert.True(c.t, ok) - assert.EqualValues(c.t, compare.transform(bs), compare.transform(as)) + assert.True(c.t, ok, field.Name) + assert.EqualValues(c.t, compare.transform(bs), compare.transform(as), field.Name) continue } if compare.before != nil && compare.after != nil { // // The fields are expected to have different values // - assert.EqualValues(c.t, compare.before, bi) - assert.EqualValues(c.t, compare.after, ai) + assert.EqualValues(c.t, compare.before, bi, field.Name) + assert.EqualValues(c.t, compare.after, ai, field.Name) continue } if compare.nested != nil { // - // The fields are a struct, recurse + // The fields are a struct/slice, recurse // - c.assertEqualValues(bf, af, *compare.nested) + fmt.Println("nested ", field.Name) + if reflect.TypeOf(bi).Kind() == reflect.Slice { + c.assertEqualSlices(bf, af, *compare.nested) + } else { + c.assertEqualValues(bf, af, *compare.nested) + } continue } } - assert.EqualValues(c.t, bi, ai) + assert.EqualValues(c.t, bi, ai, field.Name) } return after.Interface() } diff --git a/integrations/mirror_pull_test.go b/integrations/mirror_pull_test.go index 8f74d5fe16d6..9afaceb51b5a 100644 --- a/integrations/mirror_pull_test.go +++ b/integrations/mirror_pull_test.go @@ -50,7 +50,14 @@ func TestMirrorPull(t *testing.T) { ctx := context.Background() - mirror, err := repository.MigrateRepositoryGitData(ctx, user, mirrorRepo, opts, nil) + fetch := func(repoPath string) { + assert.NoError(t, git.Clone(ctx, opts.CloneAddr, repoPath, git.CloneRepoOptions{ + Mirror: true, + Quiet: true, + SkipTLSVerify: true, + })) + } + mirror, err := repository.MigrateRepositoryGitData(ctx, user, fetch, mirrorRepo, opts, nil) assert.NoError(t, err) gitRepo, err := git.OpenRepository(git.DefaultContext, repoPath) diff --git a/integrations/restore_repo_test.go b/integrations/restore_repo_test.go new file mode 100644 index 000000000000..9e9c99cf3c23 --- /dev/null +++ b/integrations/restore_repo_test.go @@ -0,0 +1,44 @@ +// Copyright 2022 The Gitea Authors. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE file. + +package integrations + +import ( + "context" + "net/http" + "net/url" + "testing" + + repo_model "code.gitea.io/gitea/models/repo" + "code.gitea.io/gitea/models/unittest" + user_model "code.gitea.io/gitea/models/user" + "code.gitea.io/gitea/modules/private" + + "github.com/stretchr/testify/assert" + "lab.forgefriends.org/friendlyforgeformat/gofff" + "lab.forgefriends.org/friendlyforgeformat/gofff/forges/file" +) + +func TestAPIPrivateRestoreRepo(t *testing.T) { + onGiteaRun(t, func(*testing.T, *url.URL) { + fixture := file.NewFixture(t, gofff.AllFeatures) + fixture.CreateEverything(file.User1) + + repoOwner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1}).(*user_model.User) + + repoName := "restoredrepo" + validation := true + statusCode, errStr := private.RestoreRepo( + context.Background(), + fixture.GetDirectory(), + repoOwner.Name, + repoName, + []string{"issues"}, + validation, + ) + assert.EqualValues(t, http.StatusOK, statusCode, errStr) + + unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{Name: repoName}) + }) +} diff --git a/modules/migration/options.go b/modules/migration/options.go index 1e92a1b0b35f..34e26d778a4a 100644 --- a/modules/migration/options.go +++ b/modules/migration/options.go @@ -5,7 +5,11 @@ package migration -import "code.gitea.io/gitea/modules/structs" +import ( + "code.gitea.io/gitea/modules/structs" + + "lab.forgefriends.org/friendlyforgeformat/gofff" +) // MigrateOptions defines the way a repository gets migrated // this is for internal usage by migrations module and func who interact with it @@ -40,3 +44,16 @@ type MigrateOptions struct { MigrateToRepoID int64 MirrorInterval string `json:"mirror_interval"` } + +func (m MigrateOptions) ToGofffFeatures() gofff.Features { + return gofff.Features{ + Wiki: m.Wiki, + Issues: m.Issues, + Milestones: m.Milestones, + Labels: m.Labels, + Releases: m.Releases, + Comments: m.Comments, + PullRequests: m.PullRequests, + ReleaseAssets: m.ReleaseAssets, + } +} diff --git a/modules/repository/repo.go b/modules/repository/repo.go index 436045146a99..476fc5cf9a57 100644 --- a/modules/repository/repo.go +++ b/modules/repository/repo.go @@ -51,7 +51,7 @@ func WikiRemoteURL(ctx context.Context, remote string) string { // MigrateRepositoryGitData starts migrating git related data after created migrating repository func MigrateRepositoryGitData(ctx context.Context, u *user_model.User, - repo *repo_model.Repository, opts migration.MigrateOptions, + fetch func(string), repo *repo_model.Repository, opts migration.MigrateOptions, httpTransport *http.Transport, ) (*repo_model.Repository, error) { repoPath := repo_model.RepoPath(u.Name, opts.RepoName) @@ -73,14 +73,7 @@ func MigrateRepositoryGitData(ctx context.Context, u *user_model.User, return repo, fmt.Errorf("Failed to remove %s: %v", repoPath, err) } - if err = git.Clone(ctx, opts.CloneAddr, repoPath, git.CloneRepoOptions{ - Mirror: true, - Quiet: true, - Timeout: migrateTimeout, - SkipTLSVerify: setting.Migrations.SkipTLSVerify, - }); err != nil { - return repo, fmt.Errorf("Clone: %v", err) - } + fetch(repoPath) if err := git.WriteCommitGraph(ctx, repoPath); err != nil { return repo, err @@ -226,6 +219,21 @@ func MigrateRepositoryGitData(ctx context.Context, u *user_model.User, return repo, committer.Commit() } +// MigrateRepositoryGitData starts migrating git related data after created migrating repository +func MigrateRepositoryGitDataWiki(ctx context.Context, u *user_model.User, + fetch func(string), repo *repo_model.Repository, opts migration.MigrateOptions, +) error { + wikiPath := repo_model.WikiPath(u.Name, opts.RepoName) + wikiRemotePath := WikiRemoteURL(ctx, opts.CloneAddr) + if len(wikiRemotePath) > 0 { + if err := util.RemoveAll(wikiPath); err != nil { + return fmt.Errorf("Failed to remove %s: %v", wikiPath, err) + } + fetch(wikiPath) + } + return git.WriteCommitGraph(ctx, wikiPath) +} + // cleanUpMigrateGitConfig removes mirror info which prevents "push --all". // This also removes possible user credentials. func cleanUpMigrateGitConfig(configPath string) error { diff --git a/routers/api/v1/repo/migrate.go b/routers/api/v1/repo/migrate.go index f868c53951a1..6a7617597480 100644 --- a/routers/api/v1/repo/migrate.go +++ b/routers/api/v1/repo/migrate.go @@ -22,7 +22,7 @@ import ( "code.gitea.io/gitea/modules/graceful" "code.gitea.io/gitea/modules/lfs" "code.gitea.io/gitea/modules/log" - base "code.gitea.io/gitea/modules/migration" + migration_module "code.gitea.io/gitea/modules/migration" "code.gitea.io/gitea/modules/notification" repo_module "code.gitea.io/gitea/modules/repository" "code.gitea.io/gitea/modules/setting" @@ -140,7 +140,7 @@ func Migrate(ctx *context.APIContext) { } } - opts := migrations.MigrateOptions{ + opts := migration_module.MigrateOptions{ CloneAddr: remoteAddr, RepoName: form.RepoName, Description: form.Description, @@ -221,10 +221,6 @@ func handleMigrateError(ctx *context.APIContext, repoOwner *user_model.User, rem ctx.Error(http.StatusConflict, "", "The repository with the same name already exists.") case repo_model.IsErrRepoFilesAlreadyExist(err): ctx.Error(http.StatusConflict, "", "Files already exist for this repository. Adopt them or delete them.") - case migrations.IsRateLimitError(err): - ctx.Error(http.StatusUnprocessableEntity, "", "Remote visit addressed rate limitation.") - case migrations.IsTwoFactorAuthError(err): - ctx.Error(http.StatusUnprocessableEntity, "", "Remote visit required two factors authentication.") case repo_model.IsErrReachLimitOfRepo(err): ctx.Error(http.StatusUnprocessableEntity, "", fmt.Sprintf("You have already reached your limit of %d repositories.", repoOwner.MaxCreationLimit())) case db.IsErrNameReserved(err): @@ -235,7 +231,7 @@ func handleMigrateError(ctx *context.APIContext, repoOwner *user_model.User, rem ctx.Error(http.StatusUnprocessableEntity, "", fmt.Sprintf("The pattern '%s' is not allowed in a username.", err.(db.ErrNamePatternNotAllowed).Pattern)) case models.IsErrInvalidCloneAddr(err): ctx.Error(http.StatusUnprocessableEntity, "", err) - case base.IsErrNotSupported(err): + case migration_module.IsErrNotSupported(err): ctx.Error(http.StatusUnprocessableEntity, "", err) default: err = util.SanitizeErrorCredentialURLs(err) diff --git a/routers/web/repo/migrate.go b/routers/web/repo/migrate.go index 393f8ed3d931..184f4aaa8078 100644 --- a/routers/web/repo/migrate.go +++ b/routers/web/repo/migrate.go @@ -18,6 +18,7 @@ import ( "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/modules/lfs" "code.gitea.io/gitea/modules/log" + migration_module "code.gitea.io/gitea/modules/migration" "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/structs" "code.gitea.io/gitea/modules/util" @@ -76,10 +77,6 @@ func handleMigrateError(ctx *context.Context, owner *user_model.User, err error, } switch { - case migrations.IsRateLimitError(err): - ctx.RenderWithErr(ctx.Tr("form.visit_rate_limit"), tpl, form) - case migrations.IsTwoFactorAuthError(err): - ctx.RenderWithErr(ctx.Tr("form.2fa_auth_required"), tpl, form) case repo_model.IsErrReachLimitOfRepo(err): maxCreationLimit := owner.MaxCreationLimit() msg := ctx.TrN(maxCreationLimit, "repo.form.reach_limit_of_creation_1", "repo.form.reach_limit_of_creation_n", maxCreationLimit) @@ -202,7 +199,7 @@ func MigratePost(ctx *context.Context) { } } - opts := migrations.MigrateOptions{ + opts := migration_module.MigrateOptions{ OriginalURL: form.CloneAddr, GitServiceType: form.Service, CloneAddr: remoteAddr, diff --git a/services/migrations/dump.go b/services/migrations/dump.go index a9ec459519e5..054c81ba9589 100644 --- a/services/migrations/dump.go +++ b/services/migrations/dump.go @@ -6,579 +6,94 @@ package migrations import ( "context" - "errors" - "fmt" - "io" - "net/http" - "net/url" "os" - "path" - "path/filepath" - "strconv" "strings" - "time" user_model "code.gitea.io/gitea/models/user" - "code.gitea.io/gitea/modules/git" - "code.gitea.io/gitea/modules/log" base "code.gitea.io/gitea/modules/migration" - "code.gitea.io/gitea/modules/repository" - "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/structs" - "gopkg.in/yaml.v2" + "lab.forgefriends.org/friendlyforgeformat/gofff" + gofff_domain "lab.forgefriends.org/friendlyforgeformat/gofff/domain" + gofff_forges "lab.forgefriends.org/friendlyforgeformat/gofff/forges" + gofff_file "lab.forgefriends.org/friendlyforgeformat/gofff/forges/file" ) -var _ base.Uploader = &RepositoryDumper{} - -// RepositoryDumper implements an Uploader to the local directory -type RepositoryDumper struct { - ctx context.Context - baseDir string - repoOwner string - repoName string - opts base.MigrateOptions - milestoneFile *os.File - labelFile *os.File - releaseFile *os.File - issueFile *os.File - commentFiles map[int64]*os.File - pullrequestFile *os.File - reviewFiles map[int64]*os.File - - gitRepo *git.Repository - prHeadCache map[string]struct{} -} - -// NewRepositoryDumper creates an gitea Uploader -func NewRepositoryDumper(ctx context.Context, baseDir, repoOwner, repoName string, opts base.MigrateOptions) (*RepositoryDumper, error) { - baseDir = filepath.Join(baseDir, repoOwner, repoName) - if err := os.MkdirAll(baseDir, os.ModePerm); err != nil { - return nil, err - } - return &RepositoryDumper{ - ctx: ctx, - opts: opts, - baseDir: baseDir, - repoOwner: repoOwner, - repoName: repoName, - prHeadCache: make(map[string]struct{}), - commentFiles: make(map[int64]*os.File), - reviewFiles: make(map[int64]*os.File), - }, nil -} - -// MaxBatchInsertSize returns the table's max batch insert size -func (g *RepositoryDumper) MaxBatchInsertSize(tp string) int { - return 1000 -} - -func (g *RepositoryDumper) gitPath() string { - return filepath.Join(g.baseDir, "git") -} - -func (g *RepositoryDumper) wikiPath() string { - return filepath.Join(g.baseDir, "wiki") -} - -func (g *RepositoryDumper) commentDir() string { - return filepath.Join(g.baseDir, "comments") -} - -func (g *RepositoryDumper) reviewDir() string { - return filepath.Join(g.baseDir, "reviews") -} - -func (g *RepositoryDumper) setURLToken(remoteAddr string) (string, error) { - if len(g.opts.AuthToken) > 0 || len(g.opts.AuthUsername) > 0 { - u, err := url.Parse(remoteAddr) - if err != nil { - return "", err - } - u.User = url.UserPassword(g.opts.AuthUsername, g.opts.AuthPassword) - if len(g.opts.AuthToken) > 0 { - u.User = url.UserPassword("oauth2", g.opts.AuthToken) - } - remoteAddr = u.String() - } - - return remoteAddr, nil -} - -// CreateRepo creates a repository -func (g *RepositoryDumper) CreateRepo(repo *base.Repository, opts base.MigrateOptions) error { - f, err := os.Create(filepath.Join(g.baseDir, "repo.yml")) - if err != nil { - return err - } - defer f.Close() - - bs, err := yaml.Marshal(map[string]interface{}{ - "name": repo.Name, - "owner": repo.Owner, - "description": repo.Description, - "clone_addr": opts.CloneAddr, - "original_url": repo.OriginalURL, - "is_private": opts.Private, - "service_type": opts.GitServiceType, - "wiki": opts.Wiki, - "issues": opts.Issues, - "milestones": opts.Milestones, - "labels": opts.Labels, - "releases": opts.Releases, - "comments": opts.Comments, - "pulls": opts.PullRequests, - "assets": opts.ReleaseAssets, - }) - if err != nil { - return err - } - - if _, err := f.Write(bs); err != nil { - return err - } - - repoPath := g.gitPath() - if err := os.MkdirAll(repoPath, os.ModePerm); err != nil { - return err - } - - migrateTimeout := 2 * time.Hour - - remoteAddr, err := g.setURLToken(repo.CloneURL) +// DumpRepository dump repository according MigrateOptions to a local directory +func DumpRepository(ctx context.Context, baseDir, ownerName string, opts base.MigrateOptions) error { + tmpDir, err := os.MkdirTemp(os.TempDir(), "migrate") if err != nil { return err } + defer os.RemoveAll(tmpDir) - err = git.Clone(g.ctx, remoteAddr, repoPath, git.CloneRepoOptions{ - Mirror: true, - Quiet: true, - Timeout: migrateTimeout, - SkipTLSVerify: setting.Migrations.SkipTLSVerify, - }) - if err != nil { - return fmt.Errorf("Clone: %v", err) - } - if err := git.WriteCommitGraph(g.ctx, repoPath); err != nil { - return err - } - - if opts.Wiki { - wikiPath := g.wikiPath() - wikiRemotePath := repository.WikiRemoteURL(g.ctx, remoteAddr) - if len(wikiRemotePath) > 0 { - if err := os.MkdirAll(wikiPath, os.ModePerm); err != nil { - return fmt.Errorf("Failed to remove %s: %v", wikiPath, err) - } - - if err := git.Clone(g.ctx, wikiRemotePath, wikiPath, git.CloneRepoOptions{ - Mirror: true, - Quiet: true, - Timeout: migrateTimeout, - Branch: "master", - SkipTLSVerify: setting.Migrations.SkipTLSVerify, - }); err != nil { - log.Warn("Clone wiki: %v", err) - if err := os.RemoveAll(wikiPath); err != nil { - return fmt.Errorf("Failed to remove %s: %v", wikiPath, err) - } - } else if err := git.WriteCommitGraph(g.ctx, wikiPath); err != nil { - return err - } - } - } - - g.gitRepo, err = git.OpenRepository(g.ctx, g.gitPath()) - return err -} - -// Close closes this uploader -func (g *RepositoryDumper) Close() { - if g.gitRepo != nil { - g.gitRepo.Close() - } - if g.milestoneFile != nil { - g.milestoneFile.Close() - } - if g.labelFile != nil { - g.labelFile.Close() - } - if g.releaseFile != nil { - g.releaseFile.Close() - } - if g.issueFile != nil { - g.issueFile.Close() - } - for _, f := range g.commentFiles { - f.Close() - } - if g.pullrequestFile != nil { - g.pullrequestFile.Close() - } - for _, f := range g.reviewFiles { - f.Close() - } -} - -// CreateTopics creates topics -func (g *RepositoryDumper) CreateTopics(topics ...string) error { - f, err := os.Create(filepath.Join(g.baseDir, "topic.yml")) + downloader, err := newDownloader(ctx, ownerName, tmpDir, opts, nil) if err != nil { return err } - defer f.Close() - bs, err := yaml.Marshal(map[string]interface{}{ - "topics": topics, + uploader, err := gofff_forges.NewForge(&gofff_file.Options{ + Options: gofff.Options{ + Configuration: gofff.Configuration{ + Directory: baseDir, + }, + Logger: ToGofffLogger(nil), + Features: opts.ToGofffFeatures(), + }, }) if err != nil { return err } + uploader.SetContext(ctx) - if _, err := f.Write(bs); err != nil { - return err - } - - return nil -} - -// CreateMilestones creates milestones -func (g *RepositoryDumper) CreateMilestones(milestones ...*base.Milestone) error { - var err error - if g.milestoneFile == nil { - g.milestoneFile, err = os.Create(filepath.Join(g.baseDir, "milestone.yml")) - if err != nil { - return err - } - } - - bs, err := yaml.Marshal(milestones) - if err != nil { - return err - } - - if _, err := g.milestoneFile.Write(bs); err != nil { - return err - } - - return nil -} - -// CreateLabels creates labels -func (g *RepositoryDumper) CreateLabels(labels ...*base.Label) error { - var err error - if g.labelFile == nil { - g.labelFile, err = os.Create(filepath.Join(g.baseDir, "label.yml")) - if err != nil { - return err - } - } - - bs, err := yaml.Marshal(labels) - if err != nil { - return err - } - - if _, err := g.labelFile.Write(bs); err != nil { - return err - } - - return nil -} - -// CreateReleases creates releases -func (g *RepositoryDumper) CreateReleases(releases ...*base.Release) error { - if g.opts.ReleaseAssets { - for _, release := range releases { - attachDir := filepath.Join("release_assets", release.TagName) - if err := os.MkdirAll(filepath.Join(g.baseDir, attachDir), os.ModePerm); err != nil { - return err - } - for _, asset := range release.Assets { - attachLocalPath := filepath.Join(attachDir, asset.Name) - // download attachment - - err := func(attachPath string) error { - var rc io.ReadCloser - var err error - if asset.DownloadURL == nil { - rc, err = asset.DownloadFunc() - if err != nil { - return err - } - } else { - resp, err := http.Get(*asset.DownloadURL) - if err != nil { - return err - } - rc = resp.Body - } - defer rc.Close() - - fw, err := os.Create(attachPath) - if err != nil { - return fmt.Errorf("Create: %v", err) - } - defer fw.Close() - - _, err = io.Copy(fw, rc) - return err - }(filepath.Join(g.baseDir, attachLocalPath)) - if err != nil { - return err - } - asset.DownloadURL = &attachLocalPath // to save the filepath on the yml file, change the source - } - } - } - - var err error - if g.releaseFile == nil { - g.releaseFile, err = os.Create(filepath.Join(g.baseDir, "release.yml")) - if err != nil { - return err - } - } - - bs, err := yaml.Marshal(releases) - if err != nil { - return err - } - - if _, err := g.releaseFile.Write(bs); err != nil { - return err - } - - return nil -} - -// SyncTags syncs releases with tags in the database -func (g *RepositoryDumper) SyncTags() error { - return nil -} - -// CreateIssues creates issues -func (g *RepositoryDumper) CreateIssues(issues ...*base.Issue) error { - var err error - if g.issueFile == nil { - g.issueFile, err = os.Create(filepath.Join(g.baseDir, "issue.yml")) - if err != nil { - return err - } - } - - bs, err := yaml.Marshal(issues) - if err != nil { - return err - } - - if _, err := g.issueFile.Write(bs); err != nil { - return err - } - - return nil -} - -func (g *RepositoryDumper) createItems(dir string, itemFiles map[int64]*os.File, itemsMap map[int64][]interface{}) error { - if err := os.MkdirAll(dir, os.ModePerm); err != nil { - return err - } - - for number, items := range itemsMap { - var err error - itemFile := itemFiles[number] - if itemFile == nil { - itemFile, err = os.Create(filepath.Join(dir, fmt.Sprintf("%d.yml", number))) - if err != nil { - return err - } - itemFiles[number] = itemFile - } - - bs, err := yaml.Marshal(items) - if err != nil { - return err - } - - if _, err := itemFile.Write(bs); err != nil { - return err - } - } - - return nil -} - -// CreateComments creates comments of issues -func (g *RepositoryDumper) CreateComments(comments ...*base.Comment) error { - commentsMap := make(map[int64][]interface{}, len(comments)) - for _, comment := range comments { - commentsMap[comment.IssueIndex] = append(commentsMap[comment.IssueIndex], comment) - } - - return g.createItems(g.commentDir(), g.commentFiles, commentsMap) + return gofff_domain.Migrate(ctx, downloader, uploader, ToGofffLogger(nil), opts.ToGofffFeatures()) } -// CreatePullRequests creates pull requests -func (g *RepositoryDumper) CreatePullRequests(prs ...*base.PullRequest) error { - for _, pr := range prs { - // download patch file - err := func() error { - u, err := g.setURLToken(pr.PatchURL) - if err != nil { - return err - } - resp, err := http.Get(u) - if err != nil { - return err - } - defer resp.Body.Close() - pullDir := filepath.Join(g.gitPath(), "pulls") - if err = os.MkdirAll(pullDir, os.ModePerm); err != nil { - return err - } - fPath := filepath.Join(pullDir, fmt.Sprintf("%d.patch", pr.Number)) - f, err := os.Create(fPath) - if err != nil { - return err - } - defer f.Close() - if _, err = io.Copy(f, resp.Body); err != nil { - return err - } - pr.PatchURL = "git/pulls/" + fmt.Sprintf("%d.patch", pr.Number) - - return nil - }() - if err != nil { - return err - } - - // set head information - pullHead := filepath.Join(g.gitPath(), "refs", "pull", fmt.Sprintf("%d", pr.Number)) - if err := os.MkdirAll(pullHead, os.ModePerm); err != nil { - return err - } - p, err := os.Create(filepath.Join(pullHead, "head")) - if err != nil { - return err - } - _, err = p.WriteString(pr.Head.SHA) - p.Close() - if err != nil { - return err - } - - if pr.IsForkPullRequest() && pr.State != "closed" { - if pr.Head.OwnerName != "" { - remote := pr.Head.OwnerName - _, ok := g.prHeadCache[remote] - if !ok { - // git remote add - // TODO: how to handle private CloneURL? - err := g.gitRepo.AddRemote(remote, pr.Head.CloneURL, true) - if err != nil { - log.Error("AddRemote failed: %s", err) - } else { - g.prHeadCache[remote] = struct{}{} - ok = true - } - } - - if ok { - _, _, err = git.NewCommand(g.ctx, "fetch", remote, pr.Head.Ref).RunStdString(&git.RunOpts{Dir: g.gitPath()}) - if err != nil { - log.Error("Fetch branch from %s failed: %v", pr.Head.CloneURL, err) - } else { - // a new branch name with will be created to as new head branch - ref := path.Join(pr.Head.OwnerName, pr.Head.Ref) - headBranch := filepath.Join(g.gitPath(), "refs", "heads", ref) - if err := os.MkdirAll(filepath.Dir(headBranch), os.ModePerm); err != nil { - return err - } - b, err := os.Create(headBranch) - if err != nil { - return err - } - _, err = b.WriteString(pr.Head.SHA) - b.Close() - if err != nil { - return err - } - pr.Head.Ref = ref - } - } - } - } - // whatever it's a forked repo PR, we have to change head info as the same as the base info - pr.Head.OwnerName = pr.Base.OwnerName - pr.Head.RepoName = pr.Base.RepoName - } - - var err error - if g.pullrequestFile == nil { - if err := os.MkdirAll(g.baseDir, os.ModePerm); err != nil { - return err - } - g.pullrequestFile, err = os.Create(filepath.Join(g.baseDir, "pull_request.yml")) - if err != nil { - return err - } - } - - bs, err := yaml.Marshal(prs) +// RestoreRepository restore a repository from the disk directory +func RestoreRepository(ctx context.Context, baseDir, ownerName, repoName string, units []string, validation bool) error { + // + // Uploader + // + doer, err := user_model.GetAdminUser() if err != nil { return err } - - if _, err := g.pullrequestFile.Write(bs); err != nil { - return err - } - - return nil -} - -// CreateReviews create pull request reviews -func (g *RepositoryDumper) CreateReviews(reviews ...*base.Review) error { - reviewsMap := make(map[int64][]interface{}, len(reviews)) - for _, review := range reviews { - reviewsMap[review.IssueIndex] = append(reviewsMap[review.IssueIndex], review) + serviceType := structs.GiteaService + opts := base.MigrateOptions{ + RepoName: repoName, + GitServiceType: serviceType, } + updateOptionsUnits(&opts, units) + uploader := NewGiteaLocalUploader(ctx, doer, ownerName, opts) - return g.createItems(g.reviewDir(), g.reviewFiles, reviewsMap) -} - -// Rollback when migrating failed, this will rollback all the changes. -func (g *RepositoryDumper) Rollback() error { - g.Close() - return os.RemoveAll(g.baseDir) -} - -// Finish when migrating succeed, this will update something. -func (g *RepositoryDumper) Finish() error { - return nil -} - -// DumpRepository dump repository according MigrateOptions to a local directory -func DumpRepository(ctx context.Context, baseDir, ownerName string, opts base.MigrateOptions) error { - downloader, err := newDownloader(ctx, ownerName, opts) - if err != nil { - return err - } - uploader, err := NewRepositoryDumper(ctx, baseDir, ownerName, opts.RepoName, opts) + // + // Downloader + // + downloader, err := gofff_forges.NewForge(&gofff_file.Options{ + Options: gofff.Options{ + Configuration: gofff.Configuration{ + Directory: baseDir, + }, + Logger: ToGofffLogger(nil), + Features: opts.ToGofffFeatures(), + }, + Validation: validation, + }) if err != nil { return err } + uploader.SetContext(ctx) - if err := migrateRepository(downloader, uploader, opts, nil); err != nil { - if err1 := uploader.Rollback(); err1 != nil { - log.Error("rollback failed: %v", err1) - } + // + // Restore what is read from file to the local Gitea instance + // + if err := gofff_domain.Migrate(ctx, downloader, uploader, ToGofffLogger(nil), opts.ToGofffFeatures()); err != nil { return err } - return nil + return updateMigrationPosterIDByGitService(ctx, serviceType) } -func updateOptionsUnits(opts *base.MigrateOptions, units []string) error { +func updateOptionsUnits(opts *base.MigrateOptions, units []string) { if len(units) == 0 { opts.Wiki = true opts.Issues = true @@ -609,43 +124,7 @@ func updateOptionsUnits(opts *base.MigrateOptions, units []string) error { opts.Comments = true case "pull_requests": opts.PullRequests = true - default: - return errors.New("invalid unit: " + unit) } } } - return nil -} - -// RestoreRepository restore a repository from the disk directory -func RestoreRepository(ctx context.Context, baseDir, ownerName, repoName string, units []string, validation bool) error { - doer, err := user_model.GetAdminUser() - if err != nil { - return err - } - uploader := NewGiteaLocalUploader(ctx, doer, ownerName, repoName) - downloader, err := NewRepositoryRestorer(ctx, baseDir, ownerName, repoName, validation) - if err != nil { - return err - } - opts, err := downloader.getRepoOptions() - if err != nil { - return err - } - tp, _ := strconv.Atoi(opts["service_type"]) - - migrateOpts := base.MigrateOptions{ - GitServiceType: structs.GitServiceType(tp), - } - if err := updateOptionsUnits(&migrateOpts, units); err != nil { - return err - } - - if err = migrateRepository(downloader, uploader, migrateOpts, nil); err != nil { - if err1 := uploader.Rollback(); err1 != nil { - log.Error("rollback failed: %v", err1) - } - return err - } - return updateMigrationPosterIDByGitService(ctx, structs.GitServiceType(tp)) } diff --git a/services/migrations/error.go b/services/migrations/error.go index d26fa8112cbf..6b1f3fa71177 100644 --- a/services/migrations/error.go +++ b/services/migrations/error.go @@ -7,21 +7,7 @@ package migrations import ( "errors" - - "github.com/google/go-github/v45/github" ) // ErrRepoNotCreated returns the error that repository not created var ErrRepoNotCreated = errors.New("repository is not created yet") - -// IsRateLimitError returns true if the err is github.RateLimitError -func IsRateLimitError(err error) bool { - _, ok := err.(*github.RateLimitError) - return ok -} - -// IsTwoFactorAuthError returns true if the err is github.TwoFactorAuthError -func IsTwoFactorAuthError(err error) bool { - _, ok := err.(*github.TwoFactorAuthError) - return ok -} diff --git a/services/migrations/gitea_uploader.go b/services/migrations/gitea_uploader.go index c7a6f9b02f2c..ad501cf16e2c 100644 --- a/services/migrations/gitea_uploader.go +++ b/services/migrations/gitea_uploader.go @@ -9,15 +9,11 @@ import ( "context" "fmt" "io" - "os" - "path/filepath" - "strconv" "strings" "time" "code.gitea.io/gitea/models" "code.gitea.io/gitea/models/db" - "code.gitea.io/gitea/models/foreignreference" issues_model "code.gitea.io/gitea/models/issues" repo_model "code.gitea.io/gitea/models/repo" user_model "code.gitea.io/gitea/models/user" @@ -29,16 +25,21 @@ import ( "code.gitea.io/gitea/modules/storage" "code.gitea.io/gitea/modules/structs" "code.gitea.io/gitea/modules/timeutil" - "code.gitea.io/gitea/modules/uri" "code.gitea.io/gitea/services/pull" gouuid "github.com/google/uuid" + gofff "lab.forgefriends.org/friendlyforgeformat/gofff" + gofff_gitea "lab.forgefriends.org/friendlyforgeformat/gofff/forges/gitea" + gofff_null "lab.forgefriends.org/friendlyforgeformat/gofff/forges/null" + gofff_format "lab.forgefriends.org/friendlyforgeformat/gofff/format" ) -var _ base.Uploader = &GiteaLocalUploader{} +var _ gofff.ForgeInterface = &GiteaLocalUploader{} // GiteaLocalUploader implements an Uploader to gitea sites type GiteaLocalUploader struct { + gofff_null.Null + opts base.MigrateOptions ctx context.Context doer *user_model.User repoOwner string @@ -48,7 +49,7 @@ type GiteaLocalUploader struct { milestones map[string]int64 issues map[int64]*issues_model.Issue gitRepo *git.Repository - prHeadCache map[string]struct{} + prHeadCache gofff_gitea.PrHeadCache sameApp bool userMap map[int64]int64 // external user id mapping to user id prCache map[int64]*issues_model.PullRequest @@ -56,21 +57,25 @@ type GiteaLocalUploader struct { } // NewGiteaLocalUploader creates an gitea Uploader via gitea API v1 -func NewGiteaLocalUploader(ctx context.Context, doer *user_model.User, repoOwner, repoName string) *GiteaLocalUploader { +func NewGiteaLocalUploader(ctx context.Context, doer *user_model.User, repoOwner string, opts base.MigrateOptions) *GiteaLocalUploader { return &GiteaLocalUploader{ + opts: opts, ctx: ctx, doer: doer, repoOwner: repoOwner, - repoName: repoName, + repoName: opts.RepoName, labels: make(map[string]*issues_model.Label), milestones: make(map[string]int64), issues: make(map[int64]*issues_model.Issue), - prHeadCache: make(map[string]struct{}), + prHeadCache: make(gofff_gitea.PrHeadCache), userMap: make(map[int64]int64), prCache: make(map[int64]*issues_model.PullRequest), } } +func (g *GiteaLocalUploader) SetContext(ctx context.Context) { +} + // MaxBatchInsertSize returns the table's max batch insert size func (g *GiteaLocalUploader) MaxBatchInsertSize(tp string) int { switch tp { @@ -91,54 +96,74 @@ func (g *GiteaLocalUploader) MaxBatchInsertSize(tp string) int { } // CreateRepo creates a repository -func (g *GiteaLocalUploader) CreateRepo(repo *base.Repository, opts base.MigrateOptions) error { +func (g *GiteaLocalUploader) CreateProject(project *gofff_format.Project) { owner, err := user_model.GetUserByName(g.ctx, g.repoOwner) if err != nil { - return err + panic(err) } var r *repo_model.Repository - if opts.MigrateToRepoID <= 0 { + if g.opts.MigrateToRepoID <= 0 { r, err = repo_module.CreateRepository(g.doer, owner, models.CreateRepoOptions{ Name: g.repoName, - Description: repo.Description, - OriginalURL: repo.OriginalURL, - GitServiceType: opts.GitServiceType, - IsPrivate: opts.Private, - IsMirror: opts.Mirror, + Description: project.Description, + OriginalURL: project.OriginalURL, + GitServiceType: g.opts.GitServiceType, + IsPrivate: g.opts.Private, + IsMirror: g.opts.Mirror, Status: repo_model.RepositoryBeingMigrated, + DefaultBranch: project.DefaultBranch, }) } else { - r, err = repo_model.GetRepositoryByID(opts.MigrateToRepoID) + r, err = repo_model.GetRepositoryByID(g.opts.MigrateToRepoID) } if err != nil { - return err + panic(err) } - r.DefaultBranch = repo.DefaultBranch - r.Description = repo.Description - - r, err = repo_module.MigrateRepositoryGitData(g.ctx, owner, r, base.MigrateOptions{ - RepoName: g.repoName, - Description: repo.Description, - OriginalURL: repo.OriginalURL, - GitServiceType: opts.GitServiceType, - Mirror: repo.IsMirror, - LFS: opts.LFS, - LFSEndpoint: opts.LFSEndpoint, - CloneAddr: repo.CloneURL, - Private: repo.IsPrivate, - Wiki: opts.Wiki, - Releases: opts.Releases, // if didn't get releases, then sync them from tags - MirrorInterval: opts.MirrorInterval, - }, NewMigrationHTTPTransport()) - - g.sameApp = strings.HasPrefix(repo.OriginalURL, setting.AppURL) - g.repo = r + r.Description = project.Description + + g.sameApp = strings.HasPrefix(project.OriginalURL, setting.AppURL) +} + +// CreateRepo creates a repository +func (g *GiteaLocalUploader) CreateRepositories(repositories ...*gofff_format.Repository) { + owner, err := user_model.GetUserByName(g.ctx, g.repoOwner) if err != nil { - return err + panic(err) + } + + r, err := repo_model.GetRepositoryByOwnerAndNameCtx(g.ctx, g.repoOwner, g.repoName) + if err != nil { + panic(err) + } + + for _, repository := range repositories { + switch repository.Name { + case gofff_format.RepositoryNameDefault: + r, err = repo_module.MigrateRepositoryGitData(g.ctx, owner, repository.Fetch, r, g.opts, NewMigrationHTTPTransport()) + + g.repo = r + if err != nil { + panic(err) + } + + g.gitRepo, err = git.OpenRepository(g.ctx, r.RepoPath()) + if err != nil { + panic(err) + } + + case gofff_format.RepositoryNameWiki: + if g.opts.Wiki { + err = repo_module.MigrateRepositoryGitDataWiki(g.ctx, owner, repository.Fetch, r, g.opts) + if err != nil { + panic(err) + } + } + + default: + panic(fmt.Errorf("unknown repository name %v", repository.Name)) + } } - g.gitRepo, err = git.OpenRepository(g.ctx, r.RepoPath()) - return err } // Close closes this uploader @@ -149,21 +174,21 @@ func (g *GiteaLocalUploader) Close() { } // CreateTopics creates topics -func (g *GiteaLocalUploader) CreateTopics(topics ...string) error { - // ignore topics to long for the db - c := 0 +func (g *GiteaLocalUploader) CreateTopics(topics ...*gofff_format.Topic) { + // ignore topics too long for the db + trimmedTopics := make([]string, 0, len(topics)) for i := range topics { - if len(topics[i]) <= 50 { - topics[c] = topics[i] - c++ + if len(topics[i].Name) <= 50 { + trimmedTopics = append(trimmedTopics, topics[i].Name) } } - topics = topics[:c] - return repo_model.SaveTopics(g.repo.ID, topics...) + if err := repo_model.SaveTopics(g.repo.ID, trimmedTopics...); err != nil { + panic(err) + } } // CreateMilestones creates milestones -func (g *GiteaLocalUploader) CreateMilestones(milestones ...*base.Milestone) error { +func (g *GiteaLocalUploader) CreateMilestones(milestones ...*gofff_format.Milestone) { mss := make([]*issues_model.Milestone, 0, len(milestones)) for _, milestone := range milestones { var deadline timeutil.TimeStamp @@ -204,17 +229,16 @@ func (g *GiteaLocalUploader) CreateMilestones(milestones ...*base.Milestone) err err := models.InsertMilestones(mss...) if err != nil { - return err + panic(err) } for _, ms := range mss { g.milestones[ms.Name] = ms.ID } - return nil } // CreateLabels creates labels -func (g *GiteaLocalUploader) CreateLabels(labels ...*base.Label) error { +func (g *GiteaLocalUploader) CreateLabels(labels ...*gofff_format.Label) { lbs := make([]*issues_model.Label, 0, len(labels)) for _, label := range labels { lbs = append(lbs, &issues_model.Label{ @@ -227,16 +251,15 @@ func (g *GiteaLocalUploader) CreateLabels(labels ...*base.Label) error { err := issues_model.NewLabels(lbs...) if err != nil { - return err + panic(err) } for _, lb := range lbs { g.labels[lb.Name] = lb } - return nil } // CreateReleases creates releases -func (g *GiteaLocalUploader) CreateReleases(releases ...*base.Release) error { +func (g *GiteaLocalUploader) CreateReleases(releases ...*gofff_format.Release) { rels := make([]*models.Release, 0, len(releases)) for _, release := range releases { if release.Created.IsZero() { @@ -261,7 +284,7 @@ func (g *GiteaLocalUploader) CreateReleases(releases ...*base.Release) error { } if err := g.remapUser(release, &rel); err != nil { - return err + panic(err) } // calc NumCommits if possible @@ -269,12 +292,12 @@ func (g *GiteaLocalUploader) CreateReleases(releases ...*base.Release) error { commit, err := g.gitRepo.GetTagCommit(rel.TagName) if !git.IsErrNotExist(err) { if err != nil { - return fmt.Errorf("GetTagCommit[%v]: %v", rel.TagName, err) + panic(fmt.Errorf("GetTagCommit[%v]: %v", rel.TagName, err)) } rel.Sha1 = commit.ID.String() rel.NumCommits, err = commit.CommitsCount() if err != nil { - return fmt.Errorf("CommitsCount: %v", err) + panic(fmt.Errorf("CommitsCount: %v", err)) } } } @@ -297,29 +320,16 @@ func (g *GiteaLocalUploader) CreateReleases(releases ...*base.Release) error { // download attachment err := func() error { - // asset.DownloadURL maybe a local file - var rc io.ReadCloser - var err error - if asset.DownloadFunc != nil { - rc, err = asset.DownloadFunc() - if err != nil { - return err - } - } else if asset.DownloadURL != nil { - rc, err = uri.Open(*asset.DownloadURL) - if err != nil { - return err - } - } + rc := asset.DownloadFunc() if rc == nil { return nil } - _, err = storage.Attachments.Save(attach.RelativePath(), rc, int64(*asset.Size)) + _, err := storage.Attachments.Save(attach.RelativePath(), rc, int64(*asset.Size)) rc.Close() return err }() if err != nil { - return err + panic(err) } rel.Attachments = append(rel.Attachments, &attach) @@ -328,7 +338,9 @@ func (g *GiteaLocalUploader) CreateReleases(releases ...*base.Release) error { rels = append(rels, &rel) } - return models.InsertReleases(rels...) + if err := models.InsertReleases(rels...); err != nil { + panic(err) + } } // SyncTags syncs releases with tags in the database @@ -337,7 +349,7 @@ func (g *GiteaLocalUploader) SyncTags() error { } // CreateIssues creates issues -func (g *GiteaLocalUploader) CreateIssues(issues ...*base.Issue) error { +func (g *GiteaLocalUploader) CreateIssues(issues ...*gofff_format.Issue) { iss := make([]*issues_model.Issue, 0, len(issues)) for _, issue := range issues { var labels []*issues_model.Label @@ -378,16 +390,10 @@ func (g *GiteaLocalUploader) CreateIssues(issues ...*base.Issue) error { Labels: labels, CreatedUnix: timeutil.TimeStamp(issue.Created.Unix()), UpdatedUnix: timeutil.TimeStamp(issue.Updated.Unix()), - ForeignReference: &foreignreference.ForeignReference{ - LocalIndex: issue.GetLocalIndex(), - ForeignIndex: strconv.FormatInt(issue.GetForeignIndex(), 10), - RepoID: g.repo.ID, - Type: foreignreference.TypeIssue, - }, } if err := g.remapUser(issue, &is); err != nil { - return err + panic(err) } if issue.Closed != nil { @@ -400,7 +406,7 @@ func (g *GiteaLocalUploader) CreateIssues(issues ...*base.Issue) error { CreatedUnix: timeutil.TimeStampNow(), } if err := g.remapUser(reaction, &res); err != nil { - return err + panic(err) } is.Reactions = append(is.Reactions, &res) } @@ -409,25 +415,23 @@ func (g *GiteaLocalUploader) CreateIssues(issues ...*base.Issue) error { if len(iss) > 0 { if err := models.InsertIssues(iss...); err != nil { - return err + panic(err) } for _, is := range iss { g.issues[is.Index] = is } } - - return nil } // CreateComments creates comments of issues -func (g *GiteaLocalUploader) CreateComments(comments ...*base.Comment) error { +func (g *GiteaLocalUploader) CreateComments(commentable gofff_format.Commentable, comments ...*gofff_format.Comment) { cms := make([]*issues_model.Comment, 0, len(comments)) for _, comment := range comments { var issue *issues_model.Issue issue, ok := g.issues[comment.IssueIndex] if !ok { - return fmt.Errorf("comment references non existent IssueIndex %d", comment.IssueIndex) + panic(fmt.Errorf("comment references non existent IssueIndex %d", comment.IssueIndex)) } if comment.Created.IsZero() { @@ -446,7 +450,7 @@ func (g *GiteaLocalUploader) CreateComments(comments ...*base.Comment) error { } if err := g.remapUser(comment, &cm); err != nil { - return err + panic(err) } // add reactions @@ -456,7 +460,7 @@ func (g *GiteaLocalUploader) CreateComments(comments ...*base.Comment) error { CreatedUnix: timeutil.TimeStampNow(), } if err := g.remapUser(reaction, &res); err != nil { - return err + panic(err) } cm.Reactions = append(cm.Reactions, &res) } @@ -465,141 +469,38 @@ func (g *GiteaLocalUploader) CreateComments(comments ...*base.Comment) error { } if len(cms) == 0 { - return nil + return + } + if err := models.InsertIssueComments(cms); err != nil { + panic(err) } - return models.InsertIssueComments(cms) } // CreatePullRequests creates pull requests -func (g *GiteaLocalUploader) CreatePullRequests(prs ...*base.PullRequest) error { +func (g *GiteaLocalUploader) CreatePullRequests(prs ...*gofff_format.PullRequest) { gprs := make([]*issues_model.PullRequest, 0, len(prs)) for _, pr := range prs { gpr, err := g.newPullRequest(pr) if err != nil { - return err + panic(err) } if err := g.remapUser(pr, gpr.Issue); err != nil { - return err + panic(err) } gprs = append(gprs, gpr) } if err := models.InsertPullRequests(gprs...); err != nil { - return err + panic(err) } for _, pr := range gprs { g.issues[pr.Issue.Index] = pr.Issue pull.AddToTaskQueue(pr) } - return nil } -func (g *GiteaLocalUploader) updateGitForPullRequest(pr *base.PullRequest) (head string, err error) { - // download patch file - err = func() error { - if pr.PatchURL == "" { - return nil - } - // pr.PatchURL maybe a local file - ret, err := uri.Open(pr.PatchURL) - if err != nil { - return err - } - defer ret.Close() - pullDir := filepath.Join(g.repo.RepoPath(), "pulls") - if err = os.MkdirAll(pullDir, os.ModePerm); err != nil { - return err - } - f, err := os.Create(filepath.Join(pullDir, fmt.Sprintf("%d.patch", pr.Number))) - if err != nil { - return err - } - defer f.Close() - _, err = io.Copy(f, ret) - return err - }() - if err != nil { - return "", err - } - - // set head information - pullHead := filepath.Join(g.repo.RepoPath(), "refs", "pull", fmt.Sprintf("%d", pr.Number)) - if err := os.MkdirAll(pullHead, os.ModePerm); err != nil { - return "", err - } - p, err := os.Create(filepath.Join(pullHead, "head")) - if err != nil { - return "", err - } - _, err = p.WriteString(pr.Head.SHA) - p.Close() - if err != nil { - return "", err - } - - head = "unknown repository" - if pr.IsForkPullRequest() && pr.State != "closed" { - if pr.Head.OwnerName != "" { - remote := pr.Head.OwnerName - _, ok := g.prHeadCache[remote] - if !ok { - // git remote add - err := g.gitRepo.AddRemote(remote, pr.Head.CloneURL, true) - if err != nil { - log.Error("AddRemote failed: %s", err) - } else { - g.prHeadCache[remote] = struct{}{} - ok = true - } - } - - if ok { - _, _, err = git.NewCommand(g.ctx, "fetch", "--no-tags", "--", remote, pr.Head.Ref).RunStdString(&git.RunOpts{Dir: g.repo.RepoPath()}) - if err != nil { - log.Error("Fetch branch from %s failed: %v", pr.Head.CloneURL, err) - } else { - headBranch := filepath.Join(g.repo.RepoPath(), "refs", "heads", pr.Head.OwnerName, pr.Head.Ref) - if err := os.MkdirAll(filepath.Dir(headBranch), os.ModePerm); err != nil { - return "", err - } - b, err := os.Create(headBranch) - if err != nil { - return "", err - } - _, err = b.WriteString(pr.Head.SHA) - b.Close() - if err != nil { - return "", err - } - head = pr.Head.OwnerName + "/" + pr.Head.Ref - } - } - } - } else { - head = pr.Head.Ref - // Ensure the closed PR SHA still points to an existing ref - _, _, err = git.NewCommand(g.ctx, "rev-list", "--quiet", "-1", pr.Head.SHA).RunStdString(&git.RunOpts{Dir: g.repo.RepoPath()}) - if err != nil { - if pr.Head.SHA != "" { - // Git update-ref remove bad references with a relative path - log.Warn("Deprecated local head, removing : %v", pr.Head.SHA) - err = g.gitRepo.RemoveReference(pr.GetGitRefName()) - } else { - // The SHA is empty, remove the head file - log.Warn("Empty reference, removing : %v", pullHead) - err = os.Remove(filepath.Join(pullHead, "head")) - } - if err != nil { - log.Error("Cannot remove local head ref, %v", err) - } - } - } - - return head, nil -} - -func (g *GiteaLocalUploader) newPullRequest(pr *base.PullRequest) (*issues_model.PullRequest, error) { +func (g *GiteaLocalUploader) newPullRequest(pr *gofff_format.PullRequest) (*issues_model.PullRequest, error) { var labels []*issues_model.Label for _, label := range pr.Labels { lb, ok := g.labels[label.Name] @@ -610,9 +511,10 @@ func (g *GiteaLocalUploader) newPullRequest(pr *base.PullRequest) (*issues_model milestoneID := g.milestones[pr.Milestone] - head, err := g.updateGitForPullRequest(pr) - if err != nil { - return nil, fmt.Errorf("updateGitForPullRequest: %w", err) + _ = pr.Fetch(g.repo.RepoPath()) + head, messages := gofff_gitea.UpdateGitForPullRequest(g.ctx, &g.prHeadCache, pr, g.repo.RepoPath()) + for _, message := range messages { + log.Error(message) } if pr.Created.IsZero() { @@ -687,15 +589,15 @@ func (g *GiteaLocalUploader) newPullRequest(pr *base.PullRequest) (*issues_model func convertReviewState(state string) issues_model.ReviewType { switch state { - case base.ReviewStatePending: + case gofff_format.ReviewStatePending: return issues_model.ReviewTypePending - case base.ReviewStateApproved: + case gofff_format.ReviewStateApproved: return issues_model.ReviewTypeApprove - case base.ReviewStateChangesRequested: + case gofff_format.ReviewStateChangesRequested: return issues_model.ReviewTypeReject - case base.ReviewStateCommented: + case gofff_format.ReviewStateCommented: return issues_model.ReviewTypeComment - case base.ReviewStateRequestReview: + case gofff_format.ReviewStateRequestReview: return issues_model.ReviewTypeRequest default: return issues_model.ReviewTypePending @@ -703,13 +605,13 @@ func convertReviewState(state string) issues_model.ReviewType { } // CreateReviews create pull request reviews of currently migrated issues -func (g *GiteaLocalUploader) CreateReviews(reviews ...*base.Review) error { +func (g *GiteaLocalUploader) CreateReviews(reviewable gofff_format.Reviewable, reviews ...*gofff_format.Review) { cms := make([]*issues_model.Review, 0, len(reviews)) for _, review := range reviews { var issue *issues_model.Issue issue, ok := g.issues[review.IssueIndex] if !ok { - return fmt.Errorf("review references non existent IssueIndex %d", review.IssueIndex) + panic(fmt.Errorf("review references non existent IssueIndex %d", review.IssueIndex)) } if review.CreatedAt.IsZero() { review.CreatedAt = time.Unix(int64(issue.CreatedUnix), 0) @@ -725,7 +627,7 @@ func (g *GiteaLocalUploader) CreateReviews(reviews ...*base.Review) error { } if err := g.remapUser(review, &cm); err != nil { - return err + panic(err) } // get pr @@ -734,7 +636,7 @@ func (g *GiteaLocalUploader) CreateReviews(reviews ...*base.Review) error { var err error pr, err = issues_model.GetPullRequestByIssueIDWithNoAttributes(issue.ID) if err != nil { - return err + panic(err) } g.prCache[issue.ID] = pr } @@ -758,7 +660,7 @@ func (g *GiteaLocalUploader) CreateReviews(reviews ...*base.Review) error { _ = reader.Close() _ = writer.Close() }() - go func(comment *base.ReviewComment) { + go func(comment *gofff_format.ReviewComment) { if err := git.GetRepoRawDiffForFile(g.gitRepo, pr.MergeBase, headCommitID, git.RawDiffNormal, comment.TreePath, writer); err != nil { // We should ignore the error since the commit maybe removed when force push to the pull request log.Warn("GetRepoRawDiffForFile failed when migrating [%s, %s, %s, %s]: %v", g.gitRepo.Path, pr.MergeBase, headCommitID, comment.TreePath, err) @@ -788,7 +690,7 @@ func (g *GiteaLocalUploader) CreateReviews(reviews ...*base.Review) error { } if err := g.remapUser(review, &c); err != nil { - return err + panic(err) } cm.Comments = append(cm.Comments, &c) @@ -797,37 +699,40 @@ func (g *GiteaLocalUploader) CreateReviews(reviews ...*base.Review) error { cms = append(cms, &cm) } - return issues_model.InsertReviews(cms) + if err := issues_model.InsertReviews(cms); err != nil { + panic(err) + } } // Rollback when migrating failed, this will rollback all the changes. -func (g *GiteaLocalUploader) Rollback() error { +func (g *GiteaLocalUploader) Rollback() { if g.repo != nil && g.repo.ID > 0 { g.gitRepo.Close() if err := models.DeleteRepository(g.doer, g.repo.OwnerID, g.repo.ID); err != nil { - return err + panic(err) } } - return nil } // Finish when migrating success, this will do some status update things. -func (g *GiteaLocalUploader) Finish() error { +func (g *GiteaLocalUploader) Finish() { if g.repo == nil || g.repo.ID <= 0 { - return ErrRepoNotCreated + panic(ErrRepoNotCreated) } // update issue_index if err := issues_model.RecalculateIssueIndexForRepo(g.repo.ID); err != nil { - return err + panic(err) } if err := models.UpdateRepoStats(g.ctx, g.repo.ID); err != nil { - return err + panic(err) } g.repo.Status = repo_model.RepositoryReady - return repo_model.UpdateRepositoryCols(g.ctx, g.repo, "status") + if err := repo_model.UpdateRepositoryCols(g.ctx, g.repo, "status"); err != nil { + panic(err) + } } func (g *GiteaLocalUploader) remapUser(source user_model.ExternalUserMigrated, target user_model.ExternalUserRemappable) error { diff --git a/services/migrations/gitea_uploader_test.go b/services/migrations/gitea_uploader_test.go index 6ea1c20592b2..21f4e55946e6 100644 --- a/services/migrations/gitea_uploader_test.go +++ b/services/migrations/gitea_uploader_test.go @@ -7,137 +7,31 @@ package migrations import ( "context" - "fmt" - "os" - "path/filepath" "strconv" - "strings" "testing" - "time" "code.gitea.io/gitea/models" - "code.gitea.io/gitea/models/db" - issues_model "code.gitea.io/gitea/models/issues" - repo_model "code.gitea.io/gitea/models/repo" "code.gitea.io/gitea/models/unittest" user_model "code.gitea.io/gitea/models/user" - "code.gitea.io/gitea/modules/git" - "code.gitea.io/gitea/modules/graceful" - "code.gitea.io/gitea/modules/log" base "code.gitea.io/gitea/modules/migration" "code.gitea.io/gitea/modules/structs" - "code.gitea.io/gitea/modules/util" "github.com/stretchr/testify/assert" + gofff_format "lab.forgefriends.org/friendlyforgeformat/gofff/format" ) -func TestGiteaUploadRepo(t *testing.T) { - // FIXME: Since no accesskey or user/password will trigger rate limit of github, just skip - t.Skip() - - unittest.PrepareTestEnv(t) - - user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1}).(*user_model.User) - - var ( - ctx = context.Background() - downloader = NewGithubDownloaderV3(ctx, "https://github.com", "", "", "", "go-xorm", "builder") - repoName = "builder-" + time.Now().Format("2006-01-02-15-04-05") - uploader = NewGiteaLocalUploader(graceful.GetManager().HammerContext(), user, user.Name, repoName) - ) - - err := migrateRepository(downloader, uploader, base.MigrateOptions{ - CloneAddr: "https://github.com/go-xorm/builder", - RepoName: repoName, - AuthUsername: "", - - Wiki: true, - Issues: true, - Milestones: true, - Labels: true, - Releases: true, - Comments: true, - PullRequests: true, - Private: true, - Mirror: false, - }, nil) - assert.NoError(t, err) - - repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{OwnerID: user.ID, Name: repoName}).(*repo_model.Repository) - assert.True(t, repo.HasWiki()) - assert.EqualValues(t, repo_model.RepositoryReady, repo.Status) - - milestones, _, err := issues_model.GetMilestones(issues_model.GetMilestonesOption{ - RepoID: repo.ID, - State: structs.StateOpen, - }) - assert.NoError(t, err) - assert.Len(t, milestones, 1) - - milestones, _, err = issues_model.GetMilestones(issues_model.GetMilestonesOption{ - RepoID: repo.ID, - State: structs.StateClosed, - }) - assert.NoError(t, err) - assert.Empty(t, milestones) - - labels, err := issues_model.GetLabelsByRepoID(ctx, repo.ID, "", db.ListOptions{}) - assert.NoError(t, err) - assert.Len(t, labels, 12) - - releases, err := models.GetReleasesByRepoID(repo.ID, models.FindReleasesOptions{ - ListOptions: db.ListOptions{ - PageSize: 10, - Page: 0, - }, - IncludeTags: true, - }) - assert.NoError(t, err) - assert.Len(t, releases, 8) - - releases, err = models.GetReleasesByRepoID(repo.ID, models.FindReleasesOptions{ - ListOptions: db.ListOptions{ - PageSize: 10, - Page: 0, - }, - IncludeTags: false, - }) - assert.NoError(t, err) - assert.Len(t, releases, 1) - - issues, err := issues_model.Issues(&issues_model.IssuesOptions{ - RepoID: repo.ID, - IsPull: util.OptionalBoolFalse, - SortType: "oldest", - }) - assert.NoError(t, err) - assert.Len(t, issues, 15) - assert.NoError(t, issues[0].LoadDiscussComments()) - assert.Empty(t, issues[0].Comments) - - pulls, _, err := issues_model.PullRequests(repo.ID, &issues_model.PullRequestsOptions{ - SortType: "oldest", - }) - assert.NoError(t, err) - assert.Len(t, pulls, 30) - assert.NoError(t, pulls[0].LoadIssue()) - assert.NoError(t, pulls[0].Issue.LoadDiscussComments()) - assert.Len(t, pulls[0].Issue.Comments, 2) -} - func TestGiteaUploadRemapLocalUser(t *testing.T) { unittest.PrepareTestEnv(t) doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1}).(*user_model.User) user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2}).(*user_model.User) - repoName := "migrated" - uploader := NewGiteaLocalUploader(context.Background(), doer, doer.Name, repoName) + uploader := NewGiteaLocalUploader(context.Background(), doer, doer.Name, base.MigrateOptions{}) // call remapLocalUser uploader.sameApp = true externalID := int64(1234567) externalName := "username" - source := base.Release{ + source := gofff_format.Release{ PublisherID: externalID, PublisherName: externalName, } @@ -179,15 +73,14 @@ func TestGiteaUploadRemapExternalUser(t *testing.T) { unittest.PrepareTestEnv(t) doer := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 1}).(*user_model.User) - repoName := "migrated" - uploader := NewGiteaLocalUploader(context.Background(), doer, doer.Name, repoName) + uploader := NewGiteaLocalUploader(context.Background(), doer, doer.Name, base.MigrateOptions{}) uploader.gitServiceType = structs.GiteaService // call remapExternalUser uploader.sameApp = false externalID := int64(1234567) externalName := "username" - source := base.Release{ + source := gofff_format.Release{ PublisherID: externalID, PublisherName: externalName, } @@ -225,303 +118,3 @@ func TestGiteaUploadRemapExternalUser(t *testing.T) { assert.NoError(t, err) assert.EqualValues(t, linkedUser.ID, target.GetUserID()) } - -func TestGiteaUploadUpdateGitForPullRequest(t *testing.T) { - unittest.PrepareTestEnv(t) - - // - // fromRepo master - // - fromRepo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1}).(*repo_model.Repository) - baseRef := "master" - assert.NoError(t, git.InitRepository(git.DefaultContext, fromRepo.RepoPath(), false)) - err := git.NewCommand(git.DefaultContext, "symbolic-ref", "HEAD", git.BranchPrefix+baseRef).Run(&git.RunOpts{Dir: fromRepo.RepoPath()}) - assert.NoError(t, err) - assert.NoError(t, os.WriteFile(filepath.Join(fromRepo.RepoPath(), "README.md"), []byte(fmt.Sprintf("# Testing Repository\n\nOriginally created in: %s", fromRepo.RepoPath())), 0o644)) - assert.NoError(t, git.AddChanges(fromRepo.RepoPath(), true)) - signature := git.Signature{ - Email: "test@example.com", - Name: "test", - When: time.Now(), - } - assert.NoError(t, git.CommitChanges(fromRepo.RepoPath(), git.CommitChangesOptions{ - Committer: &signature, - Author: &signature, - Message: "Initial Commit", - })) - fromGitRepo, err := git.OpenRepository(git.DefaultContext, fromRepo.RepoPath()) - assert.NoError(t, err) - defer fromGitRepo.Close() - baseSHA, err := fromGitRepo.GetBranchCommitID(baseRef) - assert.NoError(t, err) - - // - // fromRepo branch1 - // - headRef := "branch1" - _, _, err = git.NewCommand(git.DefaultContext, "checkout", "-b", headRef).RunStdString(&git.RunOpts{Dir: fromRepo.RepoPath()}) - assert.NoError(t, err) - assert.NoError(t, os.WriteFile(filepath.Join(fromRepo.RepoPath(), "README.md"), []byte("SOMETHING"), 0o644)) - assert.NoError(t, git.AddChanges(fromRepo.RepoPath(), true)) - signature.When = time.Now() - assert.NoError(t, git.CommitChanges(fromRepo.RepoPath(), git.CommitChangesOptions{ - Committer: &signature, - Author: &signature, - Message: "Pull request", - })) - assert.NoError(t, err) - headSHA, err := fromGitRepo.GetBranchCommitID(headRef) - assert.NoError(t, err) - - fromRepoOwner := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: fromRepo.OwnerID}).(*user_model.User) - - // - // forkRepo branch2 - // - forkHeadRef := "branch2" - forkRepo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 8}).(*repo_model.Repository) - assert.NoError(t, git.CloneWithArgs(git.DefaultContext, fromRepo.RepoPath(), forkRepo.RepoPath(), []string{}, git.CloneRepoOptions{ - Branch: headRef, - })) - _, _, err = git.NewCommand(git.DefaultContext, "checkout", "-b", forkHeadRef).RunStdString(&git.RunOpts{Dir: forkRepo.RepoPath()}) - assert.NoError(t, err) - assert.NoError(t, os.WriteFile(filepath.Join(forkRepo.RepoPath(), "README.md"), []byte(fmt.Sprintf("# branch2 %s", forkRepo.RepoPath())), 0o644)) - assert.NoError(t, git.AddChanges(forkRepo.RepoPath(), true)) - assert.NoError(t, git.CommitChanges(forkRepo.RepoPath(), git.CommitChangesOptions{ - Committer: &signature, - Author: &signature, - Message: "branch2 commit", - })) - forkGitRepo, err := git.OpenRepository(git.DefaultContext, forkRepo.RepoPath()) - assert.NoError(t, err) - defer forkGitRepo.Close() - forkHeadSHA, err := forkGitRepo.GetBranchCommitID(forkHeadRef) - assert.NoError(t, err) - - toRepoName := "migrated" - uploader := NewGiteaLocalUploader(context.Background(), fromRepoOwner, fromRepoOwner.Name, toRepoName) - uploader.gitServiceType = structs.GiteaService - assert.NoError(t, uploader.CreateRepo(&base.Repository{ - Description: "description", - OriginalURL: fromRepo.RepoPath(), - CloneURL: fromRepo.RepoPath(), - IsPrivate: false, - IsMirror: true, - }, base.MigrateOptions{ - GitServiceType: structs.GiteaService, - Private: false, - Mirror: true, - })) - - for _, testCase := range []struct { - name string - head string - assertContent func(t *testing.T, content string) - pr base.PullRequest - }{ - { - name: "fork, good Head.SHA", - head: fmt.Sprintf("%s/%s", forkRepo.OwnerName, forkHeadRef), - pr: base.PullRequest{ - PatchURL: "", - Number: 1, - State: "open", - Base: base.PullRequestBranch{ - CloneURL: fromRepo.RepoPath(), - Ref: baseRef, - SHA: baseSHA, - RepoName: fromRepo.Name, - OwnerName: fromRepo.OwnerName, - }, - Head: base.PullRequestBranch{ - CloneURL: forkRepo.RepoPath(), - Ref: forkHeadRef, - SHA: forkHeadSHA, - RepoName: forkRepo.Name, - OwnerName: forkRepo.OwnerName, - }, - }, - }, - { - name: "fork, invalid Head.Ref", - head: "unknown repository", - pr: base.PullRequest{ - PatchURL: "", - Number: 1, - State: "open", - Base: base.PullRequestBranch{ - CloneURL: fromRepo.RepoPath(), - Ref: baseRef, - SHA: baseSHA, - RepoName: fromRepo.Name, - OwnerName: fromRepo.OwnerName, - }, - Head: base.PullRequestBranch{ - CloneURL: forkRepo.RepoPath(), - Ref: "INVALID", - SHA: forkHeadSHA, - RepoName: forkRepo.Name, - OwnerName: forkRepo.OwnerName, - }, - }, - assertContent: func(t *testing.T, content string) { - assert.Contains(t, content, "Fetch branch from") - }, - }, - { - name: "invalid fork CloneURL", - head: "unknown repository", - pr: base.PullRequest{ - PatchURL: "", - Number: 1, - State: "open", - Base: base.PullRequestBranch{ - CloneURL: fromRepo.RepoPath(), - Ref: baseRef, - SHA: baseSHA, - RepoName: fromRepo.Name, - OwnerName: fromRepo.OwnerName, - }, - Head: base.PullRequestBranch{ - CloneURL: "UNLIKELY", - Ref: forkHeadRef, - SHA: forkHeadSHA, - RepoName: forkRepo.Name, - OwnerName: "WRONG", - }, - }, - assertContent: func(t *testing.T, content string) { - assert.Contains(t, content, "AddRemote failed") - }, - }, - { - name: "no fork, good Head.SHA", - head: headRef, - pr: base.PullRequest{ - PatchURL: "", - Number: 1, - State: "open", - Base: base.PullRequestBranch{ - CloneURL: fromRepo.RepoPath(), - Ref: baseRef, - SHA: baseSHA, - RepoName: fromRepo.Name, - OwnerName: fromRepo.OwnerName, - }, - Head: base.PullRequestBranch{ - CloneURL: fromRepo.RepoPath(), - Ref: headRef, - SHA: headSHA, - RepoName: fromRepo.Name, - OwnerName: fromRepo.OwnerName, - }, - }, - }, - { - name: "no fork, empty Head.SHA", - head: headRef, - pr: base.PullRequest{ - PatchURL: "", - Number: 1, - State: "open", - Base: base.PullRequestBranch{ - CloneURL: fromRepo.RepoPath(), - Ref: baseRef, - SHA: baseSHA, - RepoName: fromRepo.Name, - OwnerName: fromRepo.OwnerName, - }, - Head: base.PullRequestBranch{ - CloneURL: fromRepo.RepoPath(), - Ref: headRef, - SHA: "", - RepoName: fromRepo.Name, - OwnerName: fromRepo.OwnerName, - }, - }, - assertContent: func(t *testing.T, content string) { - assert.Contains(t, content, "Empty reference, removing") - assert.NotContains(t, content, "Cannot remove local head") - }, - }, - { - name: "no fork, invalid Head.SHA", - head: headRef, - pr: base.PullRequest{ - PatchURL: "", - Number: 1, - State: "open", - Base: base.PullRequestBranch{ - CloneURL: fromRepo.RepoPath(), - Ref: baseRef, - SHA: baseSHA, - RepoName: fromRepo.Name, - OwnerName: fromRepo.OwnerName, - }, - Head: base.PullRequestBranch{ - CloneURL: fromRepo.RepoPath(), - Ref: headRef, - SHA: "brokenSHA", - RepoName: fromRepo.Name, - OwnerName: fromRepo.OwnerName, - }, - }, - assertContent: func(t *testing.T, content string) { - assert.Contains(t, content, "Deprecated local head") - assert.Contains(t, content, "Cannot remove local head") - }, - }, - { - name: "no fork, not found Head.SHA", - head: headRef, - pr: base.PullRequest{ - PatchURL: "", - Number: 1, - State: "open", - Base: base.PullRequestBranch{ - CloneURL: fromRepo.RepoPath(), - Ref: baseRef, - SHA: baseSHA, - RepoName: fromRepo.Name, - OwnerName: fromRepo.OwnerName, - }, - Head: base.PullRequestBranch{ - CloneURL: fromRepo.RepoPath(), - Ref: headRef, - SHA: "2697b352310fcd01cbd1f3dbd43b894080027f68", - RepoName: fromRepo.Name, - OwnerName: fromRepo.OwnerName, - }, - }, - assertContent: func(t *testing.T, content string) { - assert.Contains(t, content, "Deprecated local head") - assert.NotContains(t, content, "Cannot remove local head") - }, - }, - } { - t.Run(testCase.name, func(t *testing.T) { - logger, ok := log.NamedLoggers.Load(log.DEFAULT) - assert.True(t, ok) - logger.SetLogger("buffer", "buffer", "{}") - defer logger.DelLogger("buffer") - - head, err := uploader.updateGitForPullRequest(&testCase.pr) - assert.NoError(t, err) - assert.EqualValues(t, testCase.head, head) - if testCase.assertContent != nil { - fence := fmt.Sprintf(">>>>>>>>>>>>>FENCE %s<<<<<<<<<<<<<<<", testCase.name) - log.Error(fence) - var content string - for i := 0; i < 5000; i++ { - content, err = logger.GetLoggerProviderContent("buffer") - assert.NoError(t, err) - if strings.Contains(content, fence) { - break - } - time.Sleep(1 * time.Millisecond) - } - testCase.assertContent(t, content) - } - }) - } -} diff --git a/services/migrations/main_test.go b/services/migrations/main_test.go index ad9bc9c731eb..a53b49aad399 100644 --- a/services/migrations/main_test.go +++ b/services/migrations/main_test.go @@ -1,5 +1,4 @@ -// Copyright 2019 The Gitea Authors. All rights reserved. -// Copyright 2018 Jonas Franz. All rights reserved. +// Copyright 2022 The Gitea Authors. All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE file. @@ -8,12 +7,8 @@ package migrations import ( "path/filepath" "testing" - "time" "code.gitea.io/gitea/models/unittest" - base "code.gitea.io/gitea/modules/migration" - - "github.com/stretchr/testify/assert" ) func TestMain(m *testing.M) { @@ -21,249 +16,3 @@ func TestMain(m *testing.M) { GiteaRootPath: filepath.Join("..", ".."), }) } - -func timePtr(t time.Time) *time.Time { - return &t -} - -func assertTimeEqual(t *testing.T, expected, actual time.Time) { - assert.Equal(t, expected.UTC(), actual.UTC()) -} - -func assertTimePtrEqual(t *testing.T, expected, actual *time.Time) { - if expected == nil { - assert.Nil(t, actual) - } else { - assert.NotNil(t, actual) - assertTimeEqual(t, *expected, *actual) - } -} - -func assertCommentEqual(t *testing.T, expected, actual *base.Comment) { - assert.Equal(t, expected.IssueIndex, actual.IssueIndex) - assert.Equal(t, expected.PosterID, actual.PosterID) - assert.Equal(t, expected.PosterName, actual.PosterName) - assert.Equal(t, expected.PosterEmail, actual.PosterEmail) - assertTimeEqual(t, expected.Created, actual.Created) - assertTimeEqual(t, expected.Updated, actual.Updated) - assert.Equal(t, expected.Content, actual.Content) - assertReactionsEqual(t, expected.Reactions, actual.Reactions) -} - -func assertCommentsEqual(t *testing.T, expected, actual []*base.Comment) { - if assert.Len(t, actual, len(expected)) { - for i := range expected { - assertCommentEqual(t, expected[i], actual[i]) - } - } -} - -func assertLabelEqual(t *testing.T, expected, actual *base.Label) { - assert.Equal(t, expected.Name, actual.Name) - assert.Equal(t, expected.Color, actual.Color) - assert.Equal(t, expected.Description, actual.Description) -} - -func assertLabelsEqual(t *testing.T, expected, actual []*base.Label) { - if assert.Len(t, actual, len(expected)) { - for i := range expected { - assertLabelEqual(t, expected[i], actual[i]) - } - } -} - -func assertMilestoneEqual(t *testing.T, expected, actual *base.Milestone) { - assert.Equal(t, expected.Title, actual.Title) - assert.Equal(t, expected.Description, actual.Description) - assertTimePtrEqual(t, expected.Deadline, actual.Deadline) - assertTimeEqual(t, expected.Created, actual.Created) - assertTimePtrEqual(t, expected.Updated, actual.Updated) - assertTimePtrEqual(t, expected.Closed, actual.Closed) - assert.Equal(t, expected.State, actual.State) -} - -func assertMilestonesEqual(t *testing.T, expected, actual []*base.Milestone) { - if assert.Len(t, actual, len(expected)) { - for i := range expected { - assertMilestoneEqual(t, expected[i], actual[i]) - } - } -} - -func assertIssueEqual(t *testing.T, expected, actual *base.Issue) { - assert.Equal(t, expected.Number, actual.Number) - assert.Equal(t, expected.PosterID, actual.PosterID) - assert.Equal(t, expected.PosterName, actual.PosterName) - assert.Equal(t, expected.PosterEmail, actual.PosterEmail) - assert.Equal(t, expected.Title, actual.Title) - assert.Equal(t, expected.Content, actual.Content) - assert.Equal(t, expected.Ref, actual.Ref) - assert.Equal(t, expected.Milestone, actual.Milestone) - assert.Equal(t, expected.State, actual.State) - assert.Equal(t, expected.IsLocked, actual.IsLocked) - assertTimeEqual(t, expected.Created, actual.Created) - assertTimeEqual(t, expected.Updated, actual.Updated) - assertTimePtrEqual(t, expected.Closed, actual.Closed) - assertLabelsEqual(t, expected.Labels, actual.Labels) - assertReactionsEqual(t, expected.Reactions, actual.Reactions) - assert.ElementsMatch(t, expected.Assignees, actual.Assignees) -} - -func assertIssuesEqual(t *testing.T, expected, actual []*base.Issue) { - if assert.Len(t, actual, len(expected)) { - for i := range expected { - assertIssueEqual(t, expected[i], actual[i]) - } - } -} - -func assertPullRequestEqual(t *testing.T, expected, actual *base.PullRequest) { - assert.Equal(t, expected.Number, actual.Number) - assert.Equal(t, expected.Title, actual.Title) - assert.Equal(t, expected.PosterID, actual.PosterID) - assert.Equal(t, expected.PosterName, actual.PosterName) - assert.Equal(t, expected.PosterEmail, actual.PosterEmail) - assert.Equal(t, expected.Content, actual.Content) - assert.Equal(t, expected.Milestone, actual.Milestone) - assert.Equal(t, expected.State, actual.State) - assertTimeEqual(t, expected.Created, actual.Created) - assertTimeEqual(t, expected.Updated, actual.Updated) - assertTimePtrEqual(t, expected.Closed, actual.Closed) - assertLabelsEqual(t, expected.Labels, actual.Labels) - assert.Equal(t, expected.PatchURL, actual.PatchURL) - assert.Equal(t, expected.Merged, actual.Merged) - assertTimePtrEqual(t, expected.MergedTime, actual.MergedTime) - assert.Equal(t, expected.MergeCommitSHA, actual.MergeCommitSHA) - assertPullRequestBranchEqual(t, expected.Head, actual.Head) - assertPullRequestBranchEqual(t, expected.Base, actual.Base) - assert.ElementsMatch(t, expected.Assignees, actual.Assignees) - assert.Equal(t, expected.IsLocked, actual.IsLocked) - assertReactionsEqual(t, expected.Reactions, actual.Reactions) -} - -func assertPullRequestsEqual(t *testing.T, expected, actual []*base.PullRequest) { - if assert.Len(t, actual, len(expected)) { - for i := range expected { - assertPullRequestEqual(t, expected[i], actual[i]) - } - } -} - -func assertPullRequestBranchEqual(t *testing.T, expected, actual base.PullRequestBranch) { - assert.Equal(t, expected.CloneURL, actual.CloneURL) - assert.Equal(t, expected.Ref, actual.Ref) - assert.Equal(t, expected.SHA, actual.SHA) - assert.Equal(t, expected.RepoName, actual.RepoName) - assert.Equal(t, expected.OwnerName, actual.OwnerName) -} - -func assertReactionEqual(t *testing.T, expected, actual *base.Reaction) { - assert.Equal(t, expected.UserID, actual.UserID) - assert.Equal(t, expected.UserName, actual.UserName) - assert.Equal(t, expected.Content, actual.Content) -} - -func assertReactionsEqual(t *testing.T, expected, actual []*base.Reaction) { - if assert.Len(t, actual, len(expected)) { - for i := range expected { - assertReactionEqual(t, expected[i], actual[i]) - } - } -} - -func assertReleaseAssetEqual(t *testing.T, expected, actual *base.ReleaseAsset) { - assert.Equal(t, expected.ID, actual.ID) - assert.Equal(t, expected.Name, actual.Name) - assert.Equal(t, expected.ContentType, actual.ContentType) - assert.Equal(t, expected.Size, actual.Size) - assert.Equal(t, expected.DownloadCount, actual.DownloadCount) - assertTimeEqual(t, expected.Created, actual.Created) - assertTimeEqual(t, expected.Updated, actual.Updated) - assert.Equal(t, expected.DownloadURL, actual.DownloadURL) -} - -func assertReleaseAssetsEqual(t *testing.T, expected, actual []*base.ReleaseAsset) { - if assert.Len(t, actual, len(expected)) { - for i := range expected { - assertReleaseAssetEqual(t, expected[i], actual[i]) - } - } -} - -func assertReleaseEqual(t *testing.T, expected, actual *base.Release) { - assert.Equal(t, expected.TagName, actual.TagName) - assert.Equal(t, expected.TargetCommitish, actual.TargetCommitish) - assert.Equal(t, expected.Name, actual.Name) - assert.Equal(t, expected.Body, actual.Body) - assert.Equal(t, expected.Draft, actual.Draft) - assert.Equal(t, expected.Prerelease, actual.Prerelease) - assert.Equal(t, expected.PublisherID, actual.PublisherID) - assert.Equal(t, expected.PublisherName, actual.PublisherName) - assert.Equal(t, expected.PublisherEmail, actual.PublisherEmail) - assertReleaseAssetsEqual(t, expected.Assets, actual.Assets) - assertTimeEqual(t, expected.Created, actual.Created) - assertTimeEqual(t, expected.Published, actual.Published) -} - -func assertReleasesEqual(t *testing.T, expected, actual []*base.Release) { - if assert.Len(t, actual, len(expected)) { - for i := range expected { - assertReleaseEqual(t, expected[i], actual[i]) - } - } -} - -func assertRepositoryEqual(t *testing.T, expected, actual *base.Repository) { - assert.Equal(t, expected.Name, actual.Name) - assert.Equal(t, expected.Owner, actual.Owner) - assert.Equal(t, expected.IsPrivate, actual.IsPrivate) - assert.Equal(t, expected.IsMirror, actual.IsMirror) - assert.Equal(t, expected.Description, actual.Description) - assert.Equal(t, expected.CloneURL, actual.CloneURL) - assert.Equal(t, expected.OriginalURL, actual.OriginalURL) - assert.Equal(t, expected.DefaultBranch, actual.DefaultBranch) -} - -func assertReviewEqual(t *testing.T, expected, actual *base.Review) { - assert.Equal(t, expected.ID, actual.ID, "ID") - assert.Equal(t, expected.IssueIndex, actual.IssueIndex, "IsssueIndex") - assert.Equal(t, expected.ReviewerID, actual.ReviewerID, "ReviewerID") - assert.Equal(t, expected.ReviewerName, actual.ReviewerName, "ReviewerName") - assert.Equal(t, expected.Official, actual.Official, "Official") - assert.Equal(t, expected.CommitID, actual.CommitID, "CommitID") - assert.Equal(t, expected.Content, actual.Content, "Content") - assert.WithinDuration(t, expected.CreatedAt, actual.CreatedAt, 10*time.Second) - assert.Equal(t, expected.State, actual.State, "State") - assertReviewCommentsEqual(t, expected.Comments, actual.Comments) -} - -func assertReviewsEqual(t *testing.T, expected, actual []*base.Review) { - if assert.Len(t, actual, len(expected)) { - for i := range expected { - assertReviewEqual(t, expected[i], actual[i]) - } - } -} - -func assertReviewCommentEqual(t *testing.T, expected, actual *base.ReviewComment) { - assert.Equal(t, expected.ID, actual.ID) - assert.Equal(t, expected.InReplyTo, actual.InReplyTo) - assert.Equal(t, expected.Content, actual.Content) - assert.Equal(t, expected.TreePath, actual.TreePath) - assert.Equal(t, expected.DiffHunk, actual.DiffHunk) - assert.Equal(t, expected.Position, actual.Position) - assert.Equal(t, expected.Line, actual.Line) - assert.Equal(t, expected.CommitID, actual.CommitID) - assert.Equal(t, expected.PosterID, actual.PosterID) - assertReactionsEqual(t, expected.Reactions, actual.Reactions) - assertTimeEqual(t, expected.CreatedAt, actual.CreatedAt) - assertTimeEqual(t, expected.UpdatedAt, actual.UpdatedAt) -} - -func assertReviewCommentsEqual(t *testing.T, expected, actual []*base.ReviewComment) { - if assert.Len(t, actual, len(expected)) { - for i := range expected { - assertReviewCommentEqual(t, expected[i], actual[i]) - } - } -} diff --git a/services/migrations/migrate.go b/services/migrations/migrate.go index f2542173a0ee..b5e5e28ce7f4 100644 --- a/services/migrations/migrate.go +++ b/services/migrations/migrate.go @@ -10,6 +10,7 @@ import ( "fmt" "net" "net/url" + "os" "path/filepath" "strings" @@ -21,24 +22,19 @@ import ( "code.gitea.io/gitea/modules/log" base "code.gitea.io/gitea/modules/migration" "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/structs" "code.gitea.io/gitea/modules/util" + "lab.forgefriends.org/friendlyforgeformat/gofff" + gofff_domain "lab.forgefriends.org/friendlyforgeformat/gofff/domain" + gofff_forges "lab.forgefriends.org/friendlyforgeformat/gofff/forges" + gofff_gitea "lab.forgefriends.org/friendlyforgeformat/gofff/forges/gitea" ) -// MigrateOptions is equal to base.MigrateOptions -type MigrateOptions = base.MigrateOptions - var ( - factories []base.DownloaderFactory - allowList *hostmatcher.HostMatchList blockList *hostmatcher.HostMatchList ) -// RegisterDownloaderFactory registers a downloader factory -func RegisterDownloaderFactory(factory base.DownloaderFactory) { - factories = append(factories, factory) -} - // IsMigrateURLAllowed checks if an URL is allowed to be migrated from func IsMigrateURLAllowed(remoteURL string, doer *user_model.User) error { // Remote address can be HTTP/HTTPS/Git URL or local path. @@ -108,6 +104,22 @@ func checkByAllowBlockList(hostName string, addrList []net.IP) error { return blockedError } +func ToGofffLogger(messenger base.Messenger) gofff.Logger { + if messenger == nil { + messenger = func(string, ...interface{}) {} + } + return gofff.Logger{ + Message: messenger, + Trace: log.Trace, + Debug: log.Debug, + Info: log.Info, + Warn: log.Warn, + Error: log.Error, + Critical: log.Critical, + Fatal: log.Fatal, + } +} + // MigrateRepository migrate repository according MigrateOptions func MigrateRepository(ctx context.Context, doer *user_model.User, ownerName string, opts base.MigrateOptions, messenger base.Messenger) (*repo_model.Repository, error) { err := IsMigrateURLAllowed(opts.CloneAddr, doer) @@ -120,18 +132,23 @@ func MigrateRepository(ctx context.Context, doer *user_model.User, ownerName str return nil, err } } - downloader, err := newDownloader(ctx, ownerName, opts) + + tmpDir, err := os.MkdirTemp(os.TempDir(), "migrate") + if err != nil { + return nil, err + } + defer os.RemoveAll(tmpDir) + + downloader, err := newDownloader(ctx, ownerName, tmpDir, opts, messenger) if err != nil { return nil, err } - uploader := NewGiteaLocalUploader(ctx, doer, ownerName, opts.RepoName) + uploader := NewGiteaLocalUploader(ctx, doer, ownerName, opts) uploader.gitServiceType = opts.GitServiceType - if err := migrateRepository(downloader, uploader, opts, messenger); err != nil { - if err1 := uploader.Rollback(); err1 != nil { - log.Error("rollback failed: %v", err1) - } + if err := gofff_domain.Migrate(ctx, downloader, uploader, ToGofffLogger(messenger), opts.ToGofffFeatures()); err != nil { + uploader.Rollback() if err2 := admin_model.CreateRepositoryNotice(fmt.Sprintf("Migrate repository from %s failed: %v", opts.OriginalURL, err)); err2 != nil { log.Error("create respotiry notice failed: ", err2) } @@ -140,326 +157,29 @@ func MigrateRepository(ctx context.Context, doer *user_model.User, ownerName str return uploader.repo, nil } -func newDownloader(ctx context.Context, ownerName string, opts base.MigrateOptions) (base.Downloader, error) { - var ( - downloader base.Downloader - err error - ) - - for _, factory := range factories { - if factory.GitServiceType() == opts.GitServiceType { - downloader, err = factory.New(ctx, opts) - if err != nil { - return nil, err - } - break - } - } - - if downloader == nil { - opts.Wiki = true - opts.Milestones = false - opts.Labels = false - opts.Releases = false - opts.Comments = false - opts.Issues = false - opts.PullRequests = false - downloader = NewPlainGitDownloader(ownerName, opts.RepoName, opts.CloneAddr) - log.Trace("Will migrate from git: %s", opts.OriginalURL) - } - - if setting.Migrations.MaxAttempts > 1 { - downloader = base.NewRetryDownloader(ctx, downloader, setting.Migrations.MaxAttempts, setting.Migrations.RetryBackoff) - } - return downloader, nil -} - -// migrateRepository will download information and then upload it to Uploader, this is a simple -// process for small repository. For a big repository, save all the data to disk -// before upload is better -func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts base.MigrateOptions, messenger base.Messenger) error { - if messenger == nil { - messenger = base.NilMessenger - } - - repo, err := downloader.GetRepoInfo() - if err != nil { - if !base.IsErrNotSupported(err) { - return err - } - log.Info("migrating repo infos is not supported, ignored") - } - repo.IsPrivate = opts.Private - repo.IsMirror = opts.Mirror - if opts.Description != "" { - repo.Description = opts.Description - } - if repo.CloneURL, err = downloader.FormatCloneURL(opts, repo.CloneURL); err != nil { - return err - } - - log.Trace("migrating git data from %s", repo.CloneURL) - messenger("repo.migrate.migrating_git") - if err = uploader.CreateRepo(repo, opts); err != nil { - return err - } - defer uploader.Close() - - log.Trace("migrating topics") - messenger("repo.migrate.migrating_topics") - topics, err := downloader.GetTopics() - if err != nil { - if !base.IsErrNotSupported(err) { - return err - } - log.Warn("migrating topics is not supported, ignored") - } - if len(topics) != 0 { - if err = uploader.CreateTopics(topics...); err != nil { - return err - } - } - - if opts.Milestones { - log.Trace("migrating milestones") - messenger("repo.migrate.migrating_milestones") - milestones, err := downloader.GetMilestones() - if err != nil { - if !base.IsErrNotSupported(err) { - return err - } - log.Warn("migrating milestones is not supported, ignored") - } - - msBatchSize := uploader.MaxBatchInsertSize("milestone") - for len(milestones) > 0 { - if len(milestones) < msBatchSize { - msBatchSize = len(milestones) - } - - if err := uploader.CreateMilestones(milestones...); err != nil { - return err - } - milestones = milestones[msBatchSize:] - } +func newDownloader(ctx context.Context, ownerName, tmpDir string, opts base.MigrateOptions, messenger base.Messenger) (gofff.ForgeInterface, error) { + features := opts.ToGofffFeatures() + + switch opts.GitServiceType { + case structs.GiteaService: + options := gofff_gitea.Options{ + Options: gofff.Options{ + Configuration: gofff.Configuration{ + Directory: tmpDir, + NewMigrationHTTPClient: NewMigrationHTTPClient, + }, + Features: features, + Logger: ToGofffLogger(messenger), + }, + CloneAddr: opts.CloneAddr, + AuthUsername: opts.AuthUsername, + AuthToken: opts.AuthToken, + } + return gofff_forges.NewForge(&options) + default: + log.Error("Unrecognized %v", opts.GitServiceType) + return nil, fmt.Errorf("Unrecognized %v", opts.GitServiceType) } - - if opts.Labels { - log.Trace("migrating labels") - messenger("repo.migrate.migrating_labels") - labels, err := downloader.GetLabels() - if err != nil { - if !base.IsErrNotSupported(err) { - return err - } - log.Warn("migrating labels is not supported, ignored") - } - - lbBatchSize := uploader.MaxBatchInsertSize("label") - for len(labels) > 0 { - if len(labels) < lbBatchSize { - lbBatchSize = len(labels) - } - - if err := uploader.CreateLabels(labels...); err != nil { - return err - } - labels = labels[lbBatchSize:] - } - } - - if opts.Releases { - log.Trace("migrating releases") - messenger("repo.migrate.migrating_releases") - releases, err := downloader.GetReleases() - if err != nil { - if !base.IsErrNotSupported(err) { - return err - } - log.Warn("migrating releases is not supported, ignored") - } - - relBatchSize := uploader.MaxBatchInsertSize("release") - for len(releases) > 0 { - if len(releases) < relBatchSize { - relBatchSize = len(releases) - } - - if err = uploader.CreateReleases(releases[:relBatchSize]...); err != nil { - return err - } - releases = releases[relBatchSize:] - } - - // Once all releases (if any) are inserted, sync any remaining non-release tags - if err = uploader.SyncTags(); err != nil { - return err - } - } - - var ( - commentBatchSize = uploader.MaxBatchInsertSize("comment") - reviewBatchSize = uploader.MaxBatchInsertSize("review") - ) - - supportAllComments := downloader.SupportGetRepoComments() - - if opts.Issues { - log.Trace("migrating issues and comments") - messenger("repo.migrate.migrating_issues") - issueBatchSize := uploader.MaxBatchInsertSize("issue") - - for i := 1; ; i++ { - issues, isEnd, err := downloader.GetIssues(i, issueBatchSize) - if err != nil { - if !base.IsErrNotSupported(err) { - return err - } - log.Warn("migrating issues is not supported, ignored") - break - } - - if err := uploader.CreateIssues(issues...); err != nil { - return err - } - - if opts.Comments && !supportAllComments { - allComments := make([]*base.Comment, 0, commentBatchSize) - for _, issue := range issues { - log.Trace("migrating issue %d's comments", issue.Number) - comments, _, err := downloader.GetComments(issue) - if err != nil { - if !base.IsErrNotSupported(err) { - return err - } - log.Warn("migrating comments is not supported, ignored") - } - - allComments = append(allComments, comments...) - - if len(allComments) >= commentBatchSize { - if err = uploader.CreateComments(allComments[:commentBatchSize]...); err != nil { - return err - } - - allComments = allComments[commentBatchSize:] - } - } - - if len(allComments) > 0 { - if err = uploader.CreateComments(allComments...); err != nil { - return err - } - } - } - - if isEnd { - break - } - } - } - - if opts.PullRequests { - log.Trace("migrating pull requests and comments") - messenger("repo.migrate.migrating_pulls") - prBatchSize := uploader.MaxBatchInsertSize("pullrequest") - for i := 1; ; i++ { - prs, isEnd, err := downloader.GetPullRequests(i, prBatchSize) - if err != nil { - if !base.IsErrNotSupported(err) { - return err - } - log.Warn("migrating pull requests is not supported, ignored") - break - } - - if err := uploader.CreatePullRequests(prs...); err != nil { - return err - } - - if opts.Comments { - if !supportAllComments { - // plain comments - allComments := make([]*base.Comment, 0, commentBatchSize) - for _, pr := range prs { - log.Trace("migrating pull request %d's comments", pr.Number) - comments, _, err := downloader.GetComments(pr) - if err != nil { - if !base.IsErrNotSupported(err) { - return err - } - log.Warn("migrating comments is not supported, ignored") - } - - allComments = append(allComments, comments...) - - if len(allComments) >= commentBatchSize { - if err = uploader.CreateComments(allComments[:commentBatchSize]...); err != nil { - return err - } - allComments = allComments[commentBatchSize:] - } - } - if len(allComments) > 0 { - if err = uploader.CreateComments(allComments...); err != nil { - return err - } - } - } - - // migrate reviews - allReviews := make([]*base.Review, 0, reviewBatchSize) - for _, pr := range prs { - reviews, err := downloader.GetReviews(pr) - if err != nil { - if !base.IsErrNotSupported(err) { - return err - } - log.Warn("migrating reviews is not supported, ignored") - break - } - - allReviews = append(allReviews, reviews...) - - if len(allReviews) >= reviewBatchSize { - if err = uploader.CreateReviews(allReviews[:reviewBatchSize]...); err != nil { - return err - } - allReviews = allReviews[reviewBatchSize:] - } - } - if len(allReviews) > 0 { - if err = uploader.CreateReviews(allReviews...); err != nil { - return err - } - } - } - - if isEnd { - break - } - } - } - - if opts.Comments && supportAllComments { - log.Trace("migrating comments") - for i := 1; ; i++ { - comments, isEnd, err := downloader.GetAllComments(i, commentBatchSize) - if err != nil { - return err - } - - if err := uploader.CreateComments(comments...); err != nil { - return err - } - - if isEnd { - break - } - } - } - - return uploader.Finish() } // Init migrations service