2019-05-07 06:42:51 +05:30
|
|
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
|
|
|
// Copyright 2018 Jonas Franz. All rights reserved.
|
|
|
|
// Use of this source code is governed by a MIT-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package migrations
|
|
|
|
|
|
|
|
import (
|
2019-12-17 09:46:54 +05:30
|
|
|
"context"
|
2019-10-13 18:53:14 +05:30
|
|
|
"fmt"
|
2020-11-29 06:07:58 +05:30
|
|
|
"net"
|
|
|
|
"net/url"
|
2021-03-16 03:22:11 +05:30
|
|
|
"path/filepath"
|
2020-11-29 06:07:58 +05:30
|
|
|
"strings"
|
2019-10-13 18:53:14 +05:30
|
|
|
|
2019-05-07 06:42:51 +05:30
|
|
|
"code.gitea.io/gitea/models"
|
2021-11-18 11:28:42 +05:30
|
|
|
admin_model "code.gitea.io/gitea/models/admin"
|
2021-12-10 06:57:50 +05:30
|
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
2021-11-24 15:19:20 +05:30
|
|
|
user_model "code.gitea.io/gitea/models/user"
|
2021-11-20 15:04:05 +05:30
|
|
|
"code.gitea.io/gitea/modules/hostmatcher"
|
2019-05-07 06:42:51 +05:30
|
|
|
"code.gitea.io/gitea/modules/log"
|
2021-11-16 20:55:33 +05:30
|
|
|
base "code.gitea.io/gitea/modules/migration"
|
2019-11-16 14:00:06 +05:30
|
|
|
"code.gitea.io/gitea/modules/setting"
|
2021-03-16 03:22:11 +05:30
|
|
|
"code.gitea.io/gitea/modules/util"
|
2019-05-07 06:42:51 +05:30
|
|
|
)
|
|
|
|
|
|
|
|
// MigrateOptions is equal to base.MigrateOptions
|
|
|
|
type MigrateOptions = base.MigrateOptions
|
|
|
|
|
|
|
|
var (
|
|
|
|
factories []base.DownloaderFactory
|
2020-11-29 06:07:58 +05:30
|
|
|
|
2021-11-20 15:04:05 +05:30
|
|
|
allowList *hostmatcher.HostMatchList
|
|
|
|
blockList *hostmatcher.HostMatchList
|
2019-05-07 06:42:51 +05:30
|
|
|
)
|
|
|
|
|
|
|
|
// RegisterDownloaderFactory registers a downloader factory
|
|
|
|
func RegisterDownloaderFactory(factory base.DownloaderFactory) {
|
|
|
|
factories = append(factories, factory)
|
|
|
|
}
|
|
|
|
|
2021-03-16 03:22:11 +05:30
|
|
|
// IsMigrateURLAllowed checks if an URL is allowed to be migrated from
|
2021-11-24 15:19:20 +05:30
|
|
|
func IsMigrateURLAllowed(remoteURL string, doer *user_model.User) error {
|
2021-03-16 03:22:11 +05:30
|
|
|
// Remote address can be HTTP/HTTPS/Git URL or local path.
|
2021-03-18 19:28:47 +05:30
|
|
|
u, err := url.Parse(remoteURL)
|
2020-11-29 06:07:58 +05:30
|
|
|
if err != nil {
|
2022-06-12 11:13:27 +05:30
|
|
|
return &models.ErrInvalidCloneAddr{IsURLError: true, Host: remoteURL}
|
2020-11-29 06:07:58 +05:30
|
|
|
}
|
|
|
|
|
2021-03-16 03:22:11 +05:30
|
|
|
if u.Scheme == "file" || u.Scheme == "" {
|
|
|
|
if !doer.CanImportLocal() {
|
|
|
|
return &models.ErrInvalidCloneAddr{Host: "<LOCAL_FILESYSTEM>", IsPermissionDenied: true, LocalPath: true}
|
2020-11-29 06:07:58 +05:30
|
|
|
}
|
2021-03-16 03:22:11 +05:30
|
|
|
isAbs := filepath.IsAbs(u.Host + u.Path)
|
|
|
|
if !isAbs {
|
|
|
|
return &models.ErrInvalidCloneAddr{Host: "<LOCAL_FILESYSTEM>", IsInvalidPath: true, LocalPath: true}
|
|
|
|
}
|
|
|
|
isDir, err := util.IsDir(u.Host + u.Path)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Unable to check if %s is a directory: %v", u.Host+u.Path, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !isDir {
|
|
|
|
return &models.ErrInvalidCloneAddr{Host: "<LOCAL_FILESYSTEM>", IsInvalidPath: true, LocalPath: true}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if u.Scheme == "git" && u.Port() != "" && (strings.Contains(remoteURL, "%0d") || strings.Contains(remoteURL, "%0a")) {
|
|
|
|
return &models.ErrInvalidCloneAddr{Host: u.Host, IsURLError: true}
|
2020-11-29 06:07:58 +05:30
|
|
|
}
|
|
|
|
|
2021-03-16 03:22:11 +05:30
|
|
|
if u.Opaque != "" || u.Scheme != "" && u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "git" {
|
|
|
|
return &models.ErrInvalidCloneAddr{Host: u.Host, IsProtocolInvalid: true, IsPermissionDenied: true, IsURLError: true}
|
|
|
|
}
|
|
|
|
|
2021-11-20 15:04:05 +05:30
|
|
|
hostName, _, err := net.SplitHostPort(u.Host)
|
|
|
|
if err != nil {
|
|
|
|
// u.Host can be "host" or "host:port"
|
|
|
|
err = nil //nolint
|
|
|
|
hostName = u.Host
|
|
|
|
}
|
2022-05-02 09:32:17 +05:30
|
|
|
|
|
|
|
// some users only use proxy, there is no DNS resolver. it's safe to ignore the LookupIP error
|
|
|
|
addrList, _ := net.LookupIP(hostName)
|
2022-07-13 06:37:16 +05:30
|
|
|
return checkByAllowBlockList(hostName, addrList)
|
|
|
|
}
|
2021-03-08 18:40:17 +05:30
|
|
|
|
2022-07-13 06:37:16 +05:30
|
|
|
func checkByAllowBlockList(hostName string, addrList []net.IP) error {
|
2021-11-20 15:04:05 +05:30
|
|
|
var ipAllowed bool
|
|
|
|
var ipBlocked bool
|
|
|
|
for _, addr := range addrList {
|
|
|
|
ipAllowed = ipAllowed || allowList.MatchIPAddr(addr)
|
|
|
|
ipBlocked = ipBlocked || blockList.MatchIPAddr(addr)
|
|
|
|
}
|
|
|
|
var blockedError error
|
|
|
|
if blockList.MatchHostName(hostName) || ipBlocked {
|
2022-07-13 06:37:16 +05:30
|
|
|
blockedError = &models.ErrInvalidCloneAddr{Host: hostName, IsPermissionDenied: true}
|
2021-11-20 15:04:05 +05:30
|
|
|
}
|
2022-07-13 06:37:16 +05:30
|
|
|
// if we have an allow-list, check the allow-list before return to get the more accurate error
|
2021-11-20 15:04:05 +05:30
|
|
|
if !allowList.IsEmpty() {
|
|
|
|
if !allowList.MatchHostName(hostName) && !ipAllowed {
|
2022-07-13 06:37:16 +05:30
|
|
|
return &models.ErrInvalidCloneAddr{Host: hostName, IsPermissionDenied: true}
|
2020-11-29 06:07:58 +05:30
|
|
|
}
|
|
|
|
}
|
2021-11-20 15:04:05 +05:30
|
|
|
// otherwise, we always follow the blocked list
|
|
|
|
return blockedError
|
2020-11-29 06:07:58 +05:30
|
|
|
}
|
|
|
|
|
2019-05-07 06:42:51 +05:30
|
|
|
// MigrateRepository migrate repository according MigrateOptions
|
2021-12-10 06:57:50 +05:30
|
|
|
func MigrateRepository(ctx context.Context, doer *user_model.User, ownerName string, opts base.MigrateOptions, messenger base.Messenger) (*repo_model.Repository, error) {
|
2021-03-16 03:22:11 +05:30
|
|
|
err := IsMigrateURLAllowed(opts.CloneAddr, doer)
|
2020-11-29 06:07:58 +05:30
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-04-09 03:55:57 +05:30
|
|
|
if opts.LFS && len(opts.LFSEndpoint) > 0 {
|
|
|
|
err := IsMigrateURLAllowed(opts.LFSEndpoint, doer)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2020-12-27 09:04:19 +05:30
|
|
|
downloader, err := newDownloader(ctx, ownerName, opts)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-01-20 23:16:10 +05:30
|
|
|
uploader := NewGiteaLocalUploader(ctx, doer, ownerName, opts.RepoName)
|
2020-12-27 09:04:19 +05:30
|
|
|
uploader.gitServiceType = opts.GitServiceType
|
|
|
|
|
2022-08-21 18:58:15 +05:30
|
|
|
if err := migrateRepository(doer, downloader, uploader, opts, messenger); err != nil {
|
2020-12-27 09:04:19 +05:30
|
|
|
if err1 := uploader.Rollback(); err1 != nil {
|
|
|
|
log.Error("rollback failed: %v", err1)
|
|
|
|
}
|
2021-11-18 11:28:42 +05:30
|
|
|
if err2 := admin_model.CreateRepositoryNotice(fmt.Sprintf("Migrate repository from %s failed: %v", opts.OriginalURL, err)); err2 != nil {
|
2020-12-27 09:04:19 +05:30
|
|
|
log.Error("create respotiry notice failed: ", err2)
|
|
|
|
}
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return uploader.repo, nil
|
|
|
|
}
|
2020-11-29 06:07:58 +05:30
|
|
|
|
2020-12-27 09:04:19 +05:30
|
|
|
func newDownloader(ctx context.Context, ownerName string, opts base.MigrateOptions) (base.Downloader, error) {
|
2019-05-07 06:42:51 +05:30
|
|
|
var (
|
|
|
|
downloader base.Downloader
|
2020-12-27 09:04:19 +05:30
|
|
|
err error
|
2019-05-07 06:42:51 +05:30
|
|
|
)
|
|
|
|
|
|
|
|
for _, factory := range factories {
|
2020-08-28 07:06:37 +05:30
|
|
|
if factory.GitServiceType() == opts.GitServiceType {
|
2020-09-02 23:19:25 +05:30
|
|
|
downloader, err = factory.New(ctx, opts)
|
2019-05-07 06:42:51 +05:30
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if downloader == nil {
|
|
|
|
opts.Wiki = true
|
|
|
|
opts.Milestones = false
|
|
|
|
opts.Labels = false
|
|
|
|
opts.Releases = false
|
|
|
|
opts.Comments = false
|
|
|
|
opts.Issues = false
|
|
|
|
opts.PullRequests = false
|
2019-10-13 18:53:14 +05:30
|
|
|
downloader = NewPlainGitDownloader(ownerName, opts.RepoName, opts.CloneAddr)
|
2019-12-19 03:19:56 +05:30
|
|
|
log.Trace("Will migrate from git: %s", opts.OriginalURL)
|
2019-05-07 06:42:51 +05:30
|
|
|
}
|
|
|
|
|
2019-11-16 14:00:06 +05:30
|
|
|
if setting.Migrations.MaxAttempts > 1 {
|
2020-09-02 23:19:25 +05:30
|
|
|
downloader = base.NewRetryDownloader(ctx, downloader, setting.Migrations.MaxAttempts, setting.Migrations.RetryBackoff)
|
2019-11-16 14:00:06 +05:30
|
|
|
}
|
2020-12-27 09:04:19 +05:30
|
|
|
return downloader, nil
|
2019-05-07 06:42:51 +05:30
|
|
|
}
|
|
|
|
|
2020-04-20 18:00:46 +05:30
|
|
|
// migrateRepository will download information and then upload it to Uploader, this is a simple
|
2019-05-07 06:42:51 +05:30
|
|
|
// process for small repository. For a big repository, save all the data to disk
|
|
|
|
// before upload is better
|
2022-08-21 18:58:15 +05:30
|
|
|
func migrateRepository(doer *user_model.User, downloader base.Downloader, uploader base.Uploader, opts base.MigrateOptions, messenger base.Messenger) error {
|
2021-06-17 03:32:24 +05:30
|
|
|
if messenger == nil {
|
|
|
|
messenger = base.NilMessenger
|
|
|
|
}
|
|
|
|
|
2019-05-07 06:42:51 +05:30
|
|
|
repo, err := downloader.GetRepoInfo()
|
|
|
|
if err != nil {
|
2021-01-22 01:03:58 +05:30
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Info("migrating repo infos is not supported, ignored")
|
2019-05-07 06:42:51 +05:30
|
|
|
}
|
|
|
|
repo.IsPrivate = opts.Private
|
|
|
|
repo.IsMirror = opts.Mirror
|
2019-05-20 18:13:43 +05:30
|
|
|
if opts.Description != "" {
|
|
|
|
repo.Description = opts.Description
|
|
|
|
}
|
2021-01-22 01:03:58 +05:30
|
|
|
if repo.CloneURL, err = downloader.FormatCloneURL(opts, repo.CloneURL); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2022-08-21 18:58:15 +05:30
|
|
|
// If the downloader is not a RepositoryRestorer then we need to recheck the CloneURL
|
|
|
|
if _, ok := downloader.(*RepositoryRestorer); !ok {
|
|
|
|
// Now the clone URL can be rewritten by the downloader so we must recheck
|
|
|
|
if err := IsMigrateURLAllowed(repo.CloneURL, doer); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// And so can the original URL too so again we must recheck
|
|
|
|
if repo.OriginalURL != "" {
|
|
|
|
if err := IsMigrateURLAllowed(repo.OriginalURL, doer); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-04 18:44:20 +05:30
|
|
|
log.Trace("migrating git data from %s", repo.CloneURL)
|
2021-06-17 03:32:24 +05:30
|
|
|
messenger("repo.migrate.migrating_git")
|
2021-01-22 01:03:58 +05:30
|
|
|
if err = uploader.CreateRepo(repo, opts); err != nil {
|
2019-05-07 06:42:51 +05:30
|
|
|
return err
|
|
|
|
}
|
2019-11-13 12:31:19 +05:30
|
|
|
defer uploader.Close()
|
2019-05-07 06:42:51 +05:30
|
|
|
|
2019-08-14 11:46:12 +05:30
|
|
|
log.Trace("migrating topics")
|
2021-06-17 03:32:24 +05:30
|
|
|
messenger("repo.migrate.migrating_topics")
|
2019-08-14 11:46:12 +05:30
|
|
|
topics, err := downloader.GetTopics()
|
|
|
|
if err != nil {
|
2021-01-22 01:03:58 +05:30
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warn("migrating topics is not supported, ignored")
|
2019-08-14 11:46:12 +05:30
|
|
|
}
|
2021-01-22 01:03:58 +05:30
|
|
|
if len(topics) != 0 {
|
|
|
|
if err = uploader.CreateTopics(topics...); err != nil {
|
2019-08-14 11:46:12 +05:30
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-07 06:42:51 +05:30
|
|
|
if opts.Milestones {
|
|
|
|
log.Trace("migrating milestones")
|
2021-06-17 03:32:24 +05:30
|
|
|
messenger("repo.migrate.migrating_milestones")
|
2019-05-07 06:42:51 +05:30
|
|
|
milestones, err := downloader.GetMilestones()
|
|
|
|
if err != nil {
|
2021-01-22 01:03:58 +05:30
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warn("migrating milestones is not supported, ignored")
|
2019-05-07 06:42:51 +05:30
|
|
|
}
|
|
|
|
|
2019-07-07 00:54:50 +05:30
|
|
|
msBatchSize := uploader.MaxBatchInsertSize("milestone")
|
|
|
|
for len(milestones) > 0 {
|
|
|
|
if len(milestones) < msBatchSize {
|
|
|
|
msBatchSize = len(milestones)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := uploader.CreateMilestones(milestones...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
milestones = milestones[msBatchSize:]
|
2019-05-07 06:42:51 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.Labels {
|
|
|
|
log.Trace("migrating labels")
|
2021-06-17 03:32:24 +05:30
|
|
|
messenger("repo.migrate.migrating_labels")
|
2019-05-07 06:42:51 +05:30
|
|
|
labels, err := downloader.GetLabels()
|
|
|
|
if err != nil {
|
2021-01-22 01:03:58 +05:30
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warn("migrating labels is not supported, ignored")
|
2019-05-07 06:42:51 +05:30
|
|
|
}
|
|
|
|
|
2019-07-07 00:54:50 +05:30
|
|
|
lbBatchSize := uploader.MaxBatchInsertSize("label")
|
|
|
|
for len(labels) > 0 {
|
|
|
|
if len(labels) < lbBatchSize {
|
|
|
|
lbBatchSize = len(labels)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := uploader.CreateLabels(labels...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
labels = labels[lbBatchSize:]
|
2019-05-07 06:42:51 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.Releases {
|
|
|
|
log.Trace("migrating releases")
|
2021-06-17 03:32:24 +05:30
|
|
|
messenger("repo.migrate.migrating_releases")
|
2019-05-07 06:42:51 +05:30
|
|
|
releases, err := downloader.GetReleases()
|
|
|
|
if err != nil {
|
2021-01-22 01:03:58 +05:30
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warn("migrating releases is not supported, ignored")
|
2019-05-07 06:42:51 +05:30
|
|
|
}
|
|
|
|
|
2019-07-07 00:54:50 +05:30
|
|
|
relBatchSize := uploader.MaxBatchInsertSize("release")
|
|
|
|
for len(releases) > 0 {
|
2019-12-12 05:50:11 +05:30
|
|
|
if len(releases) < relBatchSize {
|
|
|
|
relBatchSize = len(releases)
|
2019-07-07 00:54:50 +05:30
|
|
|
}
|
|
|
|
|
2021-01-22 01:03:58 +05:30
|
|
|
if err = uploader.CreateReleases(releases[:relBatchSize]...); err != nil {
|
2019-07-07 00:54:50 +05:30
|
|
|
return err
|
|
|
|
}
|
|
|
|
releases = releases[relBatchSize:]
|
2019-05-07 06:42:51 +05:30
|
|
|
}
|
2019-12-12 05:50:11 +05:30
|
|
|
|
|
|
|
// Once all releases (if any) are inserted, sync any remaining non-release tags
|
2021-01-22 01:03:58 +05:30
|
|
|
if err = uploader.SyncTags(); err != nil {
|
2019-12-12 05:50:11 +05:30
|
|
|
return err
|
|
|
|
}
|
2019-05-07 06:42:51 +05:30
|
|
|
}
|
|
|
|
|
2020-01-23 22:58:15 +05:30
|
|
|
var (
|
|
|
|
commentBatchSize = uploader.MaxBatchInsertSize("comment")
|
|
|
|
reviewBatchSize = uploader.MaxBatchInsertSize("review")
|
|
|
|
)
|
2019-07-07 00:54:50 +05:30
|
|
|
|
2021-06-30 12:53:49 +05:30
|
|
|
supportAllComments := downloader.SupportGetRepoComments()
|
|
|
|
|
2019-05-07 06:42:51 +05:30
|
|
|
if opts.Issues {
|
|
|
|
log.Trace("migrating issues and comments")
|
2021-06-17 03:32:24 +05:30
|
|
|
messenger("repo.migrate.migrating_issues")
|
2022-01-20 23:16:10 +05:30
|
|
|
issueBatchSize := uploader.MaxBatchInsertSize("issue")
|
2019-07-07 00:54:50 +05:30
|
|
|
|
2019-05-31 01:56:57 +05:30
|
|
|
for i := 1; ; i++ {
|
2019-07-07 00:54:50 +05:30
|
|
|
issues, isEnd, err := downloader.GetIssues(i, issueBatchSize)
|
2019-05-07 06:42:51 +05:30
|
|
|
if err != nil {
|
2021-01-22 01:03:58 +05:30
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warn("migrating issues is not supported, ignored")
|
|
|
|
break
|
2019-05-07 06:42:51 +05:30
|
|
|
}
|
|
|
|
|
2019-06-29 19:08:22 +05:30
|
|
|
if err := uploader.CreateIssues(issues...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-05-07 06:42:51 +05:30
|
|
|
|
2021-06-30 12:53:49 +05:30
|
|
|
if opts.Comments && !supportAllComments {
|
2022-01-20 23:16:10 +05:30
|
|
|
allComments := make([]*base.Comment, 0, commentBatchSize)
|
2020-12-27 09:04:19 +05:30
|
|
|
for _, issue := range issues {
|
|
|
|
log.Trace("migrating issue %d's comments", issue.Number)
|
Store the foreign ID of issues during migration (#18446)
Storing the foreign identifier of an imported issue in the database is a prerequisite to implement idempotent migrations or mirror for issues. It is a baby step towards mirroring that introduces a new table.
At the moment when an issue is created by the Gitea uploader, it fails if the issue already exists. The Gitea uploader could be modified so that, instead of failing, it looks up the database to find an existing issue. And if it does it would update the issue instead of creating a new one. However this is not currently possible because an information is missing from the database: the foreign identifier that uniquely represents the issue being migrated is not persisted. With this change, the foreign identifier is stored in the database and the Gitea uploader will then be able to run a query to figure out if a given issue being imported already exists.
The implementation of mirroring for issues, pull requests, releases, etc. can be done in three steps:
1. Store an identifier for the element being mirrored (issue, pull request...) in the database (this is the purpose of these changes)
2. Modify the Gitea uploader to be able to update an existing repository with all it contains (issues, pull request...) instead of failing if it exists
3. Optimize the Gitea uploader to speed up the updates, when possible.
The second step creates code that does not yet exist to enable idempotent migrations with the Gitea uploader. When a migration is done for the first time, the behavior is not changed. But when a migration is done for a repository that already exists, this new code is used to update it.
The third step can use the code created in the second step to optimize and speed up migrations. For instance, when a migration is resumed, an issue that has an update time that is not more recent can be skipped and only newly created issues or updated ones will be updated. Another example of optimization could be that a webhook notifies Gitea when an issue is updated. The code triggered by the webhook would download only this issue and call the code created in the second step to update the issue, as if it was in the process of an idempotent migration.
The ForeignReferences table is added to contain local and foreign ID pairs relative to a given repository. It can later be used for pull requests and other artifacts that can be mirrored. Although the foreign id could be added as a single field in issues or pull requests, it would need to be added to all tables that represent something that can be mirrored. Creating a new table makes for a simpler and more generic design. The drawback is that it requires an extra lookup to obtain the information. However, this extra information is only required during migration or mirroring and does not impact the way Gitea currently works.
The foreign identifier of an issue or pull request is similar to the identifier of an external user, which is stored in reactions, issues, etc. as OriginalPosterID and so on. The representation of a user is however different and the ability of users to link their account to an external user at a later time is also a logic that is different from what is involved in mirroring or migrations. For these reasons, despite some commonalities, it is unclear at this time how the two tables (foreign reference and external user) could be merged together.
The ForeignID field is extracted from the issue migration context so that it can be dumped in files with dump-repo and later restored via restore-repo.
The GetAllComments downloader method is introduced to simplify the implementation and not overload the Context for the purpose of pagination. It also clarifies in which context the comments are paginated and in which context they are not.
The Context interface is no longer useful for the purpose of retrieving the LocalID and ForeignID since they are now both available from the PullRequest and Issue struct. The Reviewable and Commentable interfaces replace and serve the same purpose.
The Context data member of PullRequest and Issue becomes a DownloaderContext to clarify that its purpose is not to support in memory operations while the current downloader is acting but is not otherwise persisted. It is, for instance, used by the GitLab downloader to store the IsMergeRequest boolean and sort out issues.
---
[source](https://lab.forgefriends.org/forgefriends/forgefriends/-/merge_requests/36)
Signed-off-by: Loïc Dachary <loic@dachary.org>
Co-authored-by: Loïc Dachary <loic@dachary.org>
2022-03-17 22:38:35 +05:30
|
|
|
comments, _, err := downloader.GetComments(issue)
|
2020-12-27 09:04:19 +05:30
|
|
|
if err != nil {
|
2021-01-22 01:03:58 +05:30
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warn("migrating comments is not supported, ignored")
|
2020-12-27 09:04:19 +05:30
|
|
|
}
|
2019-05-07 06:42:51 +05:30
|
|
|
|
2020-12-27 09:04:19 +05:30
|
|
|
allComments = append(allComments, comments...)
|
2019-07-08 07:44:12 +05:30
|
|
|
|
2020-12-27 09:04:19 +05:30
|
|
|
if len(allComments) >= commentBatchSize {
|
2021-01-22 01:03:58 +05:30
|
|
|
if err = uploader.CreateComments(allComments[:commentBatchSize]...); err != nil {
|
2020-12-27 09:04:19 +05:30
|
|
|
return err
|
|
|
|
}
|
2019-06-29 19:08:22 +05:30
|
|
|
|
2020-12-27 09:04:19 +05:30
|
|
|
allComments = allComments[commentBatchSize:]
|
2019-05-07 06:42:51 +05:30
|
|
|
}
|
2019-06-29 19:08:22 +05:30
|
|
|
}
|
|
|
|
|
2020-12-27 09:04:19 +05:30
|
|
|
if len(allComments) > 0 {
|
2021-01-22 01:03:58 +05:30
|
|
|
if err = uploader.CreateComments(allComments...); err != nil {
|
2020-12-27 09:04:19 +05:30
|
|
|
return err
|
|
|
|
}
|
2019-05-07 06:42:51 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-31 01:56:57 +05:30
|
|
|
if isEnd {
|
2019-05-07 06:42:51 +05:30
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.PullRequests {
|
|
|
|
log.Trace("migrating pull requests and comments")
|
2021-06-17 03:32:24 +05:30
|
|
|
messenger("repo.migrate.migrating_pulls")
|
2022-01-20 23:16:10 +05:30
|
|
|
prBatchSize := uploader.MaxBatchInsertSize("pullrequest")
|
2019-05-31 01:56:57 +05:30
|
|
|
for i := 1; ; i++ {
|
2020-10-14 09:36:00 +05:30
|
|
|
prs, isEnd, err := downloader.GetPullRequests(i, prBatchSize)
|
2019-05-07 06:42:51 +05:30
|
|
|
if err != nil {
|
2021-01-22 01:03:58 +05:30
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warn("migrating pull requests is not supported, ignored")
|
|
|
|
break
|
2019-05-07 06:42:51 +05:30
|
|
|
}
|
|
|
|
|
2019-06-29 19:08:22 +05:30
|
|
|
if err := uploader.CreatePullRequests(prs...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-05-07 06:42:51 +05:30
|
|
|
|
2020-12-27 09:04:19 +05:30
|
|
|
if opts.Comments {
|
2021-06-30 12:53:49 +05:30
|
|
|
if !supportAllComments {
|
|
|
|
// plain comments
|
2022-01-20 23:16:10 +05:30
|
|
|
allComments := make([]*base.Comment, 0, commentBatchSize)
|
2021-06-30 12:53:49 +05:30
|
|
|
for _, pr := range prs {
|
|
|
|
log.Trace("migrating pull request %d's comments", pr.Number)
|
Store the foreign ID of issues during migration (#18446)
Storing the foreign identifier of an imported issue in the database is a prerequisite to implement idempotent migrations or mirror for issues. It is a baby step towards mirroring that introduces a new table.
At the moment when an issue is created by the Gitea uploader, it fails if the issue already exists. The Gitea uploader could be modified so that, instead of failing, it looks up the database to find an existing issue. And if it does it would update the issue instead of creating a new one. However this is not currently possible because an information is missing from the database: the foreign identifier that uniquely represents the issue being migrated is not persisted. With this change, the foreign identifier is stored in the database and the Gitea uploader will then be able to run a query to figure out if a given issue being imported already exists.
The implementation of mirroring for issues, pull requests, releases, etc. can be done in three steps:
1. Store an identifier for the element being mirrored (issue, pull request...) in the database (this is the purpose of these changes)
2. Modify the Gitea uploader to be able to update an existing repository with all it contains (issues, pull request...) instead of failing if it exists
3. Optimize the Gitea uploader to speed up the updates, when possible.
The second step creates code that does not yet exist to enable idempotent migrations with the Gitea uploader. When a migration is done for the first time, the behavior is not changed. But when a migration is done for a repository that already exists, this new code is used to update it.
The third step can use the code created in the second step to optimize and speed up migrations. For instance, when a migration is resumed, an issue that has an update time that is not more recent can be skipped and only newly created issues or updated ones will be updated. Another example of optimization could be that a webhook notifies Gitea when an issue is updated. The code triggered by the webhook would download only this issue and call the code created in the second step to update the issue, as if it was in the process of an idempotent migration.
The ForeignReferences table is added to contain local and foreign ID pairs relative to a given repository. It can later be used for pull requests and other artifacts that can be mirrored. Although the foreign id could be added as a single field in issues or pull requests, it would need to be added to all tables that represent something that can be mirrored. Creating a new table makes for a simpler and more generic design. The drawback is that it requires an extra lookup to obtain the information. However, this extra information is only required during migration or mirroring and does not impact the way Gitea currently works.
The foreign identifier of an issue or pull request is similar to the identifier of an external user, which is stored in reactions, issues, etc. as OriginalPosterID and so on. The representation of a user is however different and the ability of users to link their account to an external user at a later time is also a logic that is different from what is involved in mirroring or migrations. For these reasons, despite some commonalities, it is unclear at this time how the two tables (foreign reference and external user) could be merged together.
The ForeignID field is extracted from the issue migration context so that it can be dumped in files with dump-repo and later restored via restore-repo.
The GetAllComments downloader method is introduced to simplify the implementation and not overload the Context for the purpose of pagination. It also clarifies in which context the comments are paginated and in which context they are not.
The Context interface is no longer useful for the purpose of retrieving the LocalID and ForeignID since they are now both available from the PullRequest and Issue struct. The Reviewable and Commentable interfaces replace and serve the same purpose.
The Context data member of PullRequest and Issue becomes a DownloaderContext to clarify that its purpose is not to support in memory operations while the current downloader is acting but is not otherwise persisted. It is, for instance, used by the GitLab downloader to store the IsMergeRequest boolean and sort out issues.
---
[source](https://lab.forgefriends.org/forgefriends/forgefriends/-/merge_requests/36)
Signed-off-by: Loïc Dachary <loic@dachary.org>
Co-authored-by: Loïc Dachary <loic@dachary.org>
2022-03-17 22:38:35 +05:30
|
|
|
comments, _, err := downloader.GetComments(pr)
|
2021-06-30 12:53:49 +05:30
|
|
|
if err != nil {
|
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warn("migrating comments is not supported, ignored")
|
2021-01-22 01:03:58 +05:30
|
|
|
}
|
2019-06-29 19:08:22 +05:30
|
|
|
|
2021-06-30 12:53:49 +05:30
|
|
|
allComments = append(allComments, comments...)
|
2019-06-29 19:08:22 +05:30
|
|
|
|
2021-06-30 12:53:49 +05:30
|
|
|
if len(allComments) >= commentBatchSize {
|
|
|
|
if err = uploader.CreateComments(allComments[:commentBatchSize]...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
allComments = allComments[commentBatchSize:]
|
2020-12-27 09:04:19 +05:30
|
|
|
}
|
2019-05-07 06:42:51 +05:30
|
|
|
}
|
2021-06-30 12:53:49 +05:30
|
|
|
if len(allComments) > 0 {
|
|
|
|
if err = uploader.CreateComments(allComments...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-12-27 09:04:19 +05:30
|
|
|
}
|
2019-06-29 19:08:22 +05:30
|
|
|
}
|
|
|
|
|
2020-12-27 09:04:19 +05:30
|
|
|
// migrate reviews
|
2022-01-20 23:16:10 +05:30
|
|
|
allReviews := make([]*base.Review, 0, reviewBatchSize)
|
2020-12-27 09:04:19 +05:30
|
|
|
for _, pr := range prs {
|
Store the foreign ID of issues during migration (#18446)
Storing the foreign identifier of an imported issue in the database is a prerequisite to implement idempotent migrations or mirror for issues. It is a baby step towards mirroring that introduces a new table.
At the moment when an issue is created by the Gitea uploader, it fails if the issue already exists. The Gitea uploader could be modified so that, instead of failing, it looks up the database to find an existing issue. And if it does it would update the issue instead of creating a new one. However this is not currently possible because an information is missing from the database: the foreign identifier that uniquely represents the issue being migrated is not persisted. With this change, the foreign identifier is stored in the database and the Gitea uploader will then be able to run a query to figure out if a given issue being imported already exists.
The implementation of mirroring for issues, pull requests, releases, etc. can be done in three steps:
1. Store an identifier for the element being mirrored (issue, pull request...) in the database (this is the purpose of these changes)
2. Modify the Gitea uploader to be able to update an existing repository with all it contains (issues, pull request...) instead of failing if it exists
3. Optimize the Gitea uploader to speed up the updates, when possible.
The second step creates code that does not yet exist to enable idempotent migrations with the Gitea uploader. When a migration is done for the first time, the behavior is not changed. But when a migration is done for a repository that already exists, this new code is used to update it.
The third step can use the code created in the second step to optimize and speed up migrations. For instance, when a migration is resumed, an issue that has an update time that is not more recent can be skipped and only newly created issues or updated ones will be updated. Another example of optimization could be that a webhook notifies Gitea when an issue is updated. The code triggered by the webhook would download only this issue and call the code created in the second step to update the issue, as if it was in the process of an idempotent migration.
The ForeignReferences table is added to contain local and foreign ID pairs relative to a given repository. It can later be used for pull requests and other artifacts that can be mirrored. Although the foreign id could be added as a single field in issues or pull requests, it would need to be added to all tables that represent something that can be mirrored. Creating a new table makes for a simpler and more generic design. The drawback is that it requires an extra lookup to obtain the information. However, this extra information is only required during migration or mirroring and does not impact the way Gitea currently works.
The foreign identifier of an issue or pull request is similar to the identifier of an external user, which is stored in reactions, issues, etc. as OriginalPosterID and so on. The representation of a user is however different and the ability of users to link their account to an external user at a later time is also a logic that is different from what is involved in mirroring or migrations. For these reasons, despite some commonalities, it is unclear at this time how the two tables (foreign reference and external user) could be merged together.
The ForeignID field is extracted from the issue migration context so that it can be dumped in files with dump-repo and later restored via restore-repo.
The GetAllComments downloader method is introduced to simplify the implementation and not overload the Context for the purpose of pagination. It also clarifies in which context the comments are paginated and in which context they are not.
The Context interface is no longer useful for the purpose of retrieving the LocalID and ForeignID since they are now both available from the PullRequest and Issue struct. The Reviewable and Commentable interfaces replace and serve the same purpose.
The Context data member of PullRequest and Issue becomes a DownloaderContext to clarify that its purpose is not to support in memory operations while the current downloader is acting but is not otherwise persisted. It is, for instance, used by the GitLab downloader to store the IsMergeRequest boolean and sort out issues.
---
[source](https://lab.forgefriends.org/forgefriends/forgefriends/-/merge_requests/36)
Signed-off-by: Loïc Dachary <loic@dachary.org>
Co-authored-by: Loïc Dachary <loic@dachary.org>
2022-03-17 22:38:35 +05:30
|
|
|
reviews, err := downloader.GetReviews(pr)
|
2021-01-22 01:03:58 +05:30
|
|
|
if err != nil {
|
|
|
|
if !base.IsErrNotSupported(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
log.Warn("migrating reviews is not supported, ignored")
|
|
|
|
break
|
|
|
|
}
|
2020-01-23 22:58:15 +05:30
|
|
|
|
2020-12-27 09:04:19 +05:30
|
|
|
allReviews = append(allReviews, reviews...)
|
2020-01-23 22:58:15 +05:30
|
|
|
|
2020-12-27 09:04:19 +05:30
|
|
|
if len(allReviews) >= reviewBatchSize {
|
2021-01-22 01:03:58 +05:30
|
|
|
if err = uploader.CreateReviews(allReviews[:reviewBatchSize]...); err != nil {
|
2020-12-27 09:04:19 +05:30
|
|
|
return err
|
|
|
|
}
|
|
|
|
allReviews = allReviews[reviewBatchSize:]
|
2020-01-23 22:58:15 +05:30
|
|
|
}
|
|
|
|
}
|
2020-12-27 09:04:19 +05:30
|
|
|
if len(allReviews) > 0 {
|
2021-01-22 01:03:58 +05:30
|
|
|
if err = uploader.CreateReviews(allReviews...); err != nil {
|
2020-12-27 09:04:19 +05:30
|
|
|
return err
|
|
|
|
}
|
2020-01-23 22:58:15 +05:30
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-14 09:36:00 +05:30
|
|
|
if isEnd {
|
2019-05-07 06:42:51 +05:30
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-30 12:53:49 +05:30
|
|
|
if opts.Comments && supportAllComments {
|
|
|
|
log.Trace("migrating comments")
|
|
|
|
for i := 1; ; i++ {
|
Store the foreign ID of issues during migration (#18446)
Storing the foreign identifier of an imported issue in the database is a prerequisite to implement idempotent migrations or mirror for issues. It is a baby step towards mirroring that introduces a new table.
At the moment when an issue is created by the Gitea uploader, it fails if the issue already exists. The Gitea uploader could be modified so that, instead of failing, it looks up the database to find an existing issue. And if it does it would update the issue instead of creating a new one. However this is not currently possible because an information is missing from the database: the foreign identifier that uniquely represents the issue being migrated is not persisted. With this change, the foreign identifier is stored in the database and the Gitea uploader will then be able to run a query to figure out if a given issue being imported already exists.
The implementation of mirroring for issues, pull requests, releases, etc. can be done in three steps:
1. Store an identifier for the element being mirrored (issue, pull request...) in the database (this is the purpose of these changes)
2. Modify the Gitea uploader to be able to update an existing repository with all it contains (issues, pull request...) instead of failing if it exists
3. Optimize the Gitea uploader to speed up the updates, when possible.
The second step creates code that does not yet exist to enable idempotent migrations with the Gitea uploader. When a migration is done for the first time, the behavior is not changed. But when a migration is done for a repository that already exists, this new code is used to update it.
The third step can use the code created in the second step to optimize and speed up migrations. For instance, when a migration is resumed, an issue that has an update time that is not more recent can be skipped and only newly created issues or updated ones will be updated. Another example of optimization could be that a webhook notifies Gitea when an issue is updated. The code triggered by the webhook would download only this issue and call the code created in the second step to update the issue, as if it was in the process of an idempotent migration.
The ForeignReferences table is added to contain local and foreign ID pairs relative to a given repository. It can later be used for pull requests and other artifacts that can be mirrored. Although the foreign id could be added as a single field in issues or pull requests, it would need to be added to all tables that represent something that can be mirrored. Creating a new table makes for a simpler and more generic design. The drawback is that it requires an extra lookup to obtain the information. However, this extra information is only required during migration or mirroring and does not impact the way Gitea currently works.
The foreign identifier of an issue or pull request is similar to the identifier of an external user, which is stored in reactions, issues, etc. as OriginalPosterID and so on. The representation of a user is however different and the ability of users to link their account to an external user at a later time is also a logic that is different from what is involved in mirroring or migrations. For these reasons, despite some commonalities, it is unclear at this time how the two tables (foreign reference and external user) could be merged together.
The ForeignID field is extracted from the issue migration context so that it can be dumped in files with dump-repo and later restored via restore-repo.
The GetAllComments downloader method is introduced to simplify the implementation and not overload the Context for the purpose of pagination. It also clarifies in which context the comments are paginated and in which context they are not.
The Context interface is no longer useful for the purpose of retrieving the LocalID and ForeignID since they are now both available from the PullRequest and Issue struct. The Reviewable and Commentable interfaces replace and serve the same purpose.
The Context data member of PullRequest and Issue becomes a DownloaderContext to clarify that its purpose is not to support in memory operations while the current downloader is acting but is not otherwise persisted. It is, for instance, used by the GitLab downloader to store the IsMergeRequest boolean and sort out issues.
---
[source](https://lab.forgefriends.org/forgefriends/forgefriends/-/merge_requests/36)
Signed-off-by: Loïc Dachary <loic@dachary.org>
Co-authored-by: Loïc Dachary <loic@dachary.org>
2022-03-17 22:38:35 +05:30
|
|
|
comments, isEnd, err := downloader.GetAllComments(i, commentBatchSize)
|
2021-06-30 12:53:49 +05:30
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := uploader.CreateComments(comments...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if isEnd {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-27 09:04:19 +05:30
|
|
|
return uploader.Finish()
|
2019-05-07 06:42:51 +05:30
|
|
|
}
|
2020-11-29 06:07:58 +05:30
|
|
|
|
|
|
|
// Init migrations service
|
|
|
|
func Init() error {
|
2021-11-20 15:04:05 +05:30
|
|
|
// TODO: maybe we can deprecate these legacy ALLOWED_DOMAINS/ALLOW_LOCALNETWORKS/BLOCKED_DOMAINS, use ALLOWED_HOST_LIST/BLOCKED_HOST_LIST instead
|
2020-11-29 06:07:58 +05:30
|
|
|
|
2021-11-20 15:04:05 +05:30
|
|
|
blockList = hostmatcher.ParseSimpleMatchList("migrations.BLOCKED_DOMAINS", setting.Migrations.BlockedDomains)
|
2020-11-29 06:07:58 +05:30
|
|
|
|
2021-11-20 15:04:05 +05:30
|
|
|
allowList = hostmatcher.ParseSimpleMatchList("migrations.ALLOWED_DOMAINS/ALLOW_LOCALNETWORKS", setting.Migrations.AllowedDomains)
|
|
|
|
if allowList.IsEmpty() {
|
|
|
|
// the default policy is that migration module can access external hosts
|
|
|
|
allowList.AppendBuiltin(hostmatcher.MatchBuiltinExternal)
|
|
|
|
}
|
|
|
|
if setting.Migrations.AllowLocalNetworks {
|
|
|
|
allowList.AppendBuiltin(hostmatcher.MatchBuiltinPrivate)
|
|
|
|
allowList.AppendBuiltin(hostmatcher.MatchBuiltinLoopback)
|
|
|
|
}
|
2022-07-13 06:37:16 +05:30
|
|
|
// TODO: at the moment, if ALLOW_LOCALNETWORKS=false, ALLOWED_DOMAINS=domain.com, and domain.com has IP 127.0.0.1, then it's still allowed.
|
|
|
|
// if we want to block such case, the private&loopback should be added to the blockList when ALLOW_LOCALNETWORKS=false
|
2022-08-17 05:45:54 +05:30
|
|
|
|
|
|
|
if setting.Proxy.Enabled && setting.Proxy.ProxyURLFixed != nil {
|
|
|
|
allowList.AppendPattern(setting.Proxy.ProxyURLFixed.Host)
|
|
|
|
}
|
|
|
|
|
2020-11-29 06:07:58 +05:30
|
|
|
return nil
|
|
|
|
}
|