2019-05-07 09:12:51 +08:00
|
|
|
// Copyright 2019 The Gitea Authors. All rights reserved.
|
|
|
|
// Copyright 2018 Jonas Franz. All rights reserved.
|
|
|
|
// Use of this source code is governed by a MIT-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package migrations
|
|
|
|
|
|
|
|
import (
|
2019-12-17 12:16:54 +08:00
|
|
|
"context"
|
2019-10-13 21:23:14 +08:00
|
|
|
"fmt"
|
2020-11-29 08:37:58 +08:00
|
|
|
"net"
|
|
|
|
"net/url"
|
|
|
|
"strings"
|
2019-10-13 21:23:14 +08:00
|
|
|
|
2019-05-07 09:12:51 +08:00
|
|
|
"code.gitea.io/gitea/models"
|
|
|
|
"code.gitea.io/gitea/modules/log"
|
2020-11-29 08:37:58 +08:00
|
|
|
"code.gitea.io/gitea/modules/matchlist"
|
2019-05-07 09:12:51 +08:00
|
|
|
"code.gitea.io/gitea/modules/migrations/base"
|
2019-11-16 16:30:06 +08:00
|
|
|
"code.gitea.io/gitea/modules/setting"
|
2019-05-07 09:12:51 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
// MigrateOptions is equal to base.MigrateOptions
|
|
|
|
type MigrateOptions = base.MigrateOptions
|
|
|
|
|
|
|
|
var (
|
|
|
|
factories []base.DownloaderFactory
|
2020-11-29 08:37:58 +08:00
|
|
|
|
|
|
|
allowList *matchlist.Matchlist
|
|
|
|
blockList *matchlist.Matchlist
|
2019-05-07 09:12:51 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
// RegisterDownloaderFactory registers a downloader factory
|
|
|
|
func RegisterDownloaderFactory(factory base.DownloaderFactory) {
|
|
|
|
factories = append(factories, factory)
|
|
|
|
}
|
|
|
|
|
2020-11-29 08:37:58 +08:00
|
|
|
func isMigrateURLAllowed(remoteURL string) error {
|
|
|
|
u, err := url.Parse(strings.ToLower(remoteURL))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if strings.EqualFold(u.Scheme, "http") || strings.EqualFold(u.Scheme, "https") {
|
|
|
|
if len(setting.Migrations.AllowedDomains) > 0 {
|
|
|
|
if !allowList.Match(u.Host) {
|
|
|
|
return &models.ErrMigrationNotAllowed{Host: u.Host}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if blockList.Match(u.Host) {
|
|
|
|
return &models.ErrMigrationNotAllowed{Host: u.Host}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !setting.Migrations.AllowLocalNetworks {
|
|
|
|
addrList, err := net.LookupIP(strings.Split(u.Host, ":")[0])
|
|
|
|
if err != nil {
|
|
|
|
return &models.ErrMigrationNotAllowed{Host: u.Host, NotResolvedIP: true}
|
|
|
|
}
|
|
|
|
for _, addr := range addrList {
|
|
|
|
if isIPPrivate(addr) || !addr.IsGlobalUnicast() {
|
|
|
|
return &models.ErrMigrationNotAllowed{Host: u.Host, PrivateNet: addr.String()}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-05-07 09:12:51 +08:00
|
|
|
// MigrateRepository migrate repository according MigrateOptions
|
2019-12-17 12:16:54 +08:00
|
|
|
func MigrateRepository(ctx context.Context, doer *models.User, ownerName string, opts base.MigrateOptions) (*models.Repository, error) {
|
2020-11-29 08:37:58 +08:00
|
|
|
err := isMigrateURLAllowed(opts.CloneAddr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-05-07 09:12:51 +08:00
|
|
|
var (
|
|
|
|
downloader base.Downloader
|
2019-12-17 12:16:54 +08:00
|
|
|
uploader = NewGiteaLocalUploader(ctx, doer, ownerName, opts.RepoName)
|
2019-05-07 09:12:51 +08:00
|
|
|
)
|
|
|
|
|
|
|
|
for _, factory := range factories {
|
2020-08-28 09:36:37 +08:00
|
|
|
if factory.GitServiceType() == opts.GitServiceType {
|
2020-09-03 01:49:25 +08:00
|
|
|
downloader, err = factory.New(ctx, opts)
|
2019-05-07 09:12:51 +08:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if downloader == nil {
|
|
|
|
opts.Wiki = true
|
|
|
|
opts.Milestones = false
|
|
|
|
opts.Labels = false
|
|
|
|
opts.Releases = false
|
|
|
|
opts.Comments = false
|
|
|
|
opts.Issues = false
|
|
|
|
opts.PullRequests = false
|
2019-10-13 21:23:14 +08:00
|
|
|
downloader = NewPlainGitDownloader(ownerName, opts.RepoName, opts.CloneAddr)
|
2019-12-19 05:49:56 +08:00
|
|
|
log.Trace("Will migrate from git: %s", opts.OriginalURL)
|
2019-05-07 09:12:51 +08:00
|
|
|
}
|
|
|
|
|
2019-10-14 14:10:42 +08:00
|
|
|
uploader.gitServiceType = opts.GitServiceType
|
2019-11-16 16:30:06 +08:00
|
|
|
|
|
|
|
if setting.Migrations.MaxAttempts > 1 {
|
2020-09-03 01:49:25 +08:00
|
|
|
downloader = base.NewRetryDownloader(ctx, downloader, setting.Migrations.MaxAttempts, setting.Migrations.RetryBackoff)
|
2019-11-16 16:30:06 +08:00
|
|
|
}
|
|
|
|
|
2019-05-07 09:12:51 +08:00
|
|
|
if err := migrateRepository(downloader, uploader, opts); err != nil {
|
|
|
|
if err1 := uploader.Rollback(); err1 != nil {
|
|
|
|
log.Error("rollback failed: %v", err1)
|
|
|
|
}
|
2019-10-13 21:23:14 +08:00
|
|
|
|
2019-12-19 05:49:56 +08:00
|
|
|
if err2 := models.CreateRepositoryNotice(fmt.Sprintf("Migrate repository from %s failed: %v", opts.OriginalURL, err)); err2 != nil {
|
2020-10-29 10:23:31 +08:00
|
|
|
log.Error("create repository notice failed: ", err2)
|
2019-10-13 21:23:14 +08:00
|
|
|
}
|
2019-05-07 09:12:51 +08:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return uploader.repo, nil
|
|
|
|
}
|
|
|
|
|
2020-04-20 20:30:46 +08:00
|
|
|
// migrateRepository will download information and then upload it to Uploader, this is a simple
|
2019-05-07 09:12:51 +08:00
|
|
|
// process for small repository. For a big repository, save all the data to disk
|
|
|
|
// before upload is better
|
|
|
|
func migrateRepository(downloader base.Downloader, uploader base.Uploader, opts base.MigrateOptions) error {
|
|
|
|
repo, err := downloader.GetRepoInfo()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
repo.IsPrivate = opts.Private
|
|
|
|
repo.IsMirror = opts.Mirror
|
2019-05-20 20:43:43 +08:00
|
|
|
if opts.Description != "" {
|
|
|
|
repo.Description = opts.Description
|
|
|
|
}
|
2019-05-07 09:12:51 +08:00
|
|
|
log.Trace("migrating git data")
|
2019-07-02 05:17:16 +08:00
|
|
|
if err := uploader.CreateRepo(repo, opts); err != nil {
|
2019-05-07 09:12:51 +08:00
|
|
|
return err
|
|
|
|
}
|
2019-11-13 15:01:19 +08:00
|
|
|
defer uploader.Close()
|
2019-05-07 09:12:51 +08:00
|
|
|
|
2019-08-14 14:16:12 +08:00
|
|
|
log.Trace("migrating topics")
|
|
|
|
topics, err := downloader.GetTopics()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if len(topics) > 0 {
|
|
|
|
if err := uploader.CreateTopics(topics...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-07 09:12:51 +08:00
|
|
|
if opts.Milestones {
|
|
|
|
log.Trace("migrating milestones")
|
|
|
|
milestones, err := downloader.GetMilestones()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-07-07 03:24:50 +08:00
|
|
|
msBatchSize := uploader.MaxBatchInsertSize("milestone")
|
|
|
|
for len(milestones) > 0 {
|
|
|
|
if len(milestones) < msBatchSize {
|
|
|
|
msBatchSize = len(milestones)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := uploader.CreateMilestones(milestones...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
milestones = milestones[msBatchSize:]
|
2019-05-07 09:12:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.Labels {
|
|
|
|
log.Trace("migrating labels")
|
|
|
|
labels, err := downloader.GetLabels()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-07-07 03:24:50 +08:00
|
|
|
lbBatchSize := uploader.MaxBatchInsertSize("label")
|
|
|
|
for len(labels) > 0 {
|
|
|
|
if len(labels) < lbBatchSize {
|
|
|
|
lbBatchSize = len(labels)
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := uploader.CreateLabels(labels...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
labels = labels[lbBatchSize:]
|
2019-05-07 09:12:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.Releases {
|
|
|
|
log.Trace("migrating releases")
|
|
|
|
releases, err := downloader.GetReleases()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-07-07 03:24:50 +08:00
|
|
|
relBatchSize := uploader.MaxBatchInsertSize("release")
|
|
|
|
for len(releases) > 0 {
|
2019-12-12 08:20:11 +08:00
|
|
|
if len(releases) < relBatchSize {
|
|
|
|
relBatchSize = len(releases)
|
2019-07-07 03:24:50 +08:00
|
|
|
}
|
|
|
|
|
2020-08-28 09:36:37 +08:00
|
|
|
if err := uploader.CreateReleases(downloader, releases[:relBatchSize]...); err != nil {
|
2019-07-07 03:24:50 +08:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
releases = releases[relBatchSize:]
|
2019-05-07 09:12:51 +08:00
|
|
|
}
|
2019-12-12 08:20:11 +08:00
|
|
|
|
|
|
|
// Once all releases (if any) are inserted, sync any remaining non-release tags
|
|
|
|
if err := uploader.SyncTags(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-05-07 09:12:51 +08:00
|
|
|
}
|
|
|
|
|
2020-01-24 01:28:15 +08:00
|
|
|
var (
|
|
|
|
commentBatchSize = uploader.MaxBatchInsertSize("comment")
|
|
|
|
reviewBatchSize = uploader.MaxBatchInsertSize("review")
|
|
|
|
)
|
2019-07-07 03:24:50 +08:00
|
|
|
|
2019-05-07 09:12:51 +08:00
|
|
|
if opts.Issues {
|
|
|
|
log.Trace("migrating issues and comments")
|
2019-07-07 03:24:50 +08:00
|
|
|
var issueBatchSize = uploader.MaxBatchInsertSize("issue")
|
|
|
|
|
2019-05-31 04:26:57 +08:00
|
|
|
for i := 1; ; i++ {
|
2019-07-07 03:24:50 +08:00
|
|
|
issues, isEnd, err := downloader.GetIssues(i, issueBatchSize)
|
2019-05-07 09:12:51 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-06-29 21:38:22 +08:00
|
|
|
if err := uploader.CreateIssues(issues...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-05-07 09:12:51 +08:00
|
|
|
|
2019-06-29 21:38:22 +08:00
|
|
|
if !opts.Comments {
|
|
|
|
continue
|
|
|
|
}
|
2019-05-07 09:12:51 +08:00
|
|
|
|
2019-07-07 03:24:50 +08:00
|
|
|
var allComments = make([]*base.Comment, 0, commentBatchSize)
|
2019-06-29 21:38:22 +08:00
|
|
|
for _, issue := range issues {
|
2019-05-07 09:12:51 +08:00
|
|
|
comments, err := downloader.GetComments(issue.Number)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-07-08 10:14:12 +08:00
|
|
|
|
2019-06-29 21:38:22 +08:00
|
|
|
allComments = append(allComments, comments...)
|
|
|
|
|
2019-07-07 03:24:50 +08:00
|
|
|
if len(allComments) >= commentBatchSize {
|
|
|
|
if err := uploader.CreateComments(allComments[:commentBatchSize]...); err != nil {
|
2019-05-07 09:12:51 +08:00
|
|
|
return err
|
|
|
|
}
|
2019-07-07 03:24:50 +08:00
|
|
|
|
|
|
|
allComments = allComments[commentBatchSize:]
|
2019-06-29 21:38:22 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(allComments) > 0 {
|
|
|
|
if err := uploader.CreateComments(allComments...); err != nil {
|
|
|
|
return err
|
2019-05-07 09:12:51 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-31 04:26:57 +08:00
|
|
|
if isEnd {
|
2019-05-07 09:12:51 +08:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.PullRequests {
|
|
|
|
log.Trace("migrating pull requests and comments")
|
2019-07-07 04:32:15 +08:00
|
|
|
var prBatchSize = uploader.MaxBatchInsertSize("pullrequest")
|
2019-05-31 04:26:57 +08:00
|
|
|
for i := 1; ; i++ {
|
2020-10-14 12:06:00 +08:00
|
|
|
prs, isEnd, err := downloader.GetPullRequests(i, prBatchSize)
|
2019-05-07 09:12:51 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-06-29 21:38:22 +08:00
|
|
|
if err := uploader.CreatePullRequests(prs...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-05-07 09:12:51 +08:00
|
|
|
|
2019-06-29 21:38:22 +08:00
|
|
|
if !opts.Comments {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-01-24 01:28:15 +08:00
|
|
|
// plain comments
|
2019-07-07 03:24:50 +08:00
|
|
|
var allComments = make([]*base.Comment, 0, commentBatchSize)
|
2019-06-29 21:38:22 +08:00
|
|
|
for _, pr := range prs {
|
2019-05-07 09:12:51 +08:00
|
|
|
comments, err := downloader.GetComments(pr.Number)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-06-29 21:38:22 +08:00
|
|
|
|
|
|
|
allComments = append(allComments, comments...)
|
|
|
|
|
2019-07-07 03:24:50 +08:00
|
|
|
if len(allComments) >= commentBatchSize {
|
|
|
|
if err := uploader.CreateComments(allComments[:commentBatchSize]...); err != nil {
|
2019-05-07 09:12:51 +08:00
|
|
|
return err
|
|
|
|
}
|
2019-07-07 03:24:50 +08:00
|
|
|
allComments = allComments[commentBatchSize:]
|
2019-05-07 09:12:51 +08:00
|
|
|
}
|
|
|
|
}
|
2019-06-29 21:38:22 +08:00
|
|
|
if len(allComments) > 0 {
|
|
|
|
if err := uploader.CreateComments(allComments...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-24 01:28:15 +08:00
|
|
|
// migrate reviews
|
|
|
|
var allReviews = make([]*base.Review, 0, reviewBatchSize)
|
|
|
|
for _, pr := range prs {
|
2020-04-20 20:30:46 +08:00
|
|
|
number := pr.Number
|
|
|
|
|
|
|
|
// on gitlab migrations pull number change
|
|
|
|
if pr.OriginalNumber > 0 {
|
|
|
|
number = pr.OriginalNumber
|
|
|
|
}
|
|
|
|
|
|
|
|
reviews, err := downloader.GetReviews(number)
|
|
|
|
if pr.OriginalNumber > 0 {
|
|
|
|
for i := range reviews {
|
|
|
|
reviews[i].IssueIndex = pr.Number
|
|
|
|
}
|
|
|
|
}
|
2020-01-24 01:28:15 +08:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
allReviews = append(allReviews, reviews...)
|
|
|
|
|
|
|
|
if len(allReviews) >= reviewBatchSize {
|
|
|
|
if err := uploader.CreateReviews(allReviews[:reviewBatchSize]...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
allReviews = allReviews[reviewBatchSize:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(allReviews) > 0 {
|
|
|
|
if err := uploader.CreateReviews(allReviews...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-14 12:06:00 +08:00
|
|
|
if isEnd {
|
2019-05-07 09:12:51 +08:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2020-11-29 08:37:58 +08:00
|
|
|
|
|
|
|
// Init migrations service
|
|
|
|
func Init() error {
|
|
|
|
var err error
|
|
|
|
allowList, err = matchlist.NewMatchlist(setting.Migrations.AllowedDomains...)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("init migration allowList domains failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
blockList, err = matchlist.NewMatchlist(setting.Migrations.BlockedDomains...)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("init migration blockList domains failed: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// isIPPrivate reports whether ip is a private address, according to
|
|
|
|
// RFC 1918 (IPv4 addresses) and RFC 4193 (IPv6 addresses).
|
|
|
|
// from https://github.com/golang/go/pull/42793
|
|
|
|
// TODO remove if https://github.com/golang/go/issues/29146 got resolved
|
|
|
|
func isIPPrivate(ip net.IP) bool {
|
|
|
|
if ip4 := ip.To4(); ip4 != nil {
|
|
|
|
return ip4[0] == 10 ||
|
|
|
|
(ip4[0] == 172 && ip4[1]&0xf0 == 16) ||
|
|
|
|
(ip4[0] == 192 && ip4[1] == 168)
|
|
|
|
}
|
|
|
|
return len(ip) == net.IPv6len && ip[0]&0xfe == 0xfc
|
|
|
|
}
|