mirror of
https://github.com/go-gitea/gitea.git
synced 2024-11-25 09:40:56 +08:00
Add golangci (#6418)
This commit is contained in:
parent
5832f8d90d
commit
f9ec2f89f2
|
@ -74,12 +74,10 @@ pipeline:
|
||||||
commands:
|
commands:
|
||||||
- make clean
|
- make clean
|
||||||
- make generate
|
- make generate
|
||||||
- make vet
|
- make golangci-lint
|
||||||
- make lint
|
- make revive
|
||||||
- make fmt-check
|
|
||||||
- make swagger-check
|
- make swagger-check
|
||||||
- make swagger-validate
|
- make swagger-validate
|
||||||
- make misspell-check
|
|
||||||
- make test-vendor
|
- make test-vendor
|
||||||
- make build
|
- make build
|
||||||
when:
|
when:
|
||||||
|
|
97
.golangci.yml
Normal file
97
.golangci.yml
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
linters:
|
||||||
|
enable:
|
||||||
|
- gosimple
|
||||||
|
- deadcode
|
||||||
|
- typecheck
|
||||||
|
- govet
|
||||||
|
- errcheck
|
||||||
|
- staticcheck
|
||||||
|
- unused
|
||||||
|
- structcheck
|
||||||
|
- varcheck
|
||||||
|
- golint
|
||||||
|
- dupl
|
||||||
|
#- gocyclo # The cyclomatic complexety of a lot of functions is too high, we should refactor those another time.
|
||||||
|
- gofmt
|
||||||
|
- misspell
|
||||||
|
- gocritic
|
||||||
|
enable-all: false
|
||||||
|
disable-all: true
|
||||||
|
fast: false
|
||||||
|
|
||||||
|
linters-settings:
|
||||||
|
gocritic:
|
||||||
|
disabled-checks:
|
||||||
|
- ifElseChain
|
||||||
|
- singleCaseSwitch # Every time this occured in the code, there was no other way.
|
||||||
|
|
||||||
|
issues:
|
||||||
|
exclude-rules:
|
||||||
|
# Exclude some linters from running on tests files.
|
||||||
|
- path: _test\.go
|
||||||
|
linters:
|
||||||
|
- gocyclo
|
||||||
|
- errcheck
|
||||||
|
- dupl
|
||||||
|
- gosec
|
||||||
|
- unparam
|
||||||
|
- staticcheck
|
||||||
|
- path: models/migrations/v
|
||||||
|
linters:
|
||||||
|
- gocyclo
|
||||||
|
- errcheck
|
||||||
|
- dupl
|
||||||
|
- gosec
|
||||||
|
- linters:
|
||||||
|
- dupl
|
||||||
|
text: "webhook"
|
||||||
|
- linters:
|
||||||
|
- gocritic
|
||||||
|
text: "`ID' should not be capitalized"
|
||||||
|
- path: modules/templates/helper.go
|
||||||
|
linters:
|
||||||
|
- gocritic
|
||||||
|
- linters:
|
||||||
|
- unused
|
||||||
|
- deadcode
|
||||||
|
text: "swagger"
|
||||||
|
- path: contrib/pr/checkout.go
|
||||||
|
linters:
|
||||||
|
- errcheck
|
||||||
|
- path: models/issue.go
|
||||||
|
linters:
|
||||||
|
- errcheck
|
||||||
|
- path: models/migrations/
|
||||||
|
linters:
|
||||||
|
- errcheck
|
||||||
|
- path: modules/log/
|
||||||
|
linters:
|
||||||
|
- errcheck
|
||||||
|
- path: routers/routes/routes.go
|
||||||
|
linters:
|
||||||
|
- dupl
|
||||||
|
- path: routers/repo/view.go
|
||||||
|
linters:
|
||||||
|
- dupl
|
||||||
|
- path: models/migrations/
|
||||||
|
linters:
|
||||||
|
- unused
|
||||||
|
- linters:
|
||||||
|
- staticcheck
|
||||||
|
text: "argument x is overwritten before first use"
|
||||||
|
- path: modules/httplib/httplib.go
|
||||||
|
linters:
|
||||||
|
- staticcheck
|
||||||
|
# Enabling this would require refactoring the methods and how they are called.
|
||||||
|
- path: models/issue_comment_list.go
|
||||||
|
linters:
|
||||||
|
- dupl
|
||||||
|
# "Destroy" is misspelled in github.com/go-macaron/session/session.go:213 so it's not our responsability to fix it
|
||||||
|
- path: modules/session/virtual.go
|
||||||
|
linters:
|
||||||
|
- misspell
|
||||||
|
text: '`Destory` is a misspelling of `Destroy`'
|
||||||
|
- path: modules/session/memory.go
|
||||||
|
linters:
|
||||||
|
- misspell
|
||||||
|
text: '`Destory` is a misspelling of `Destroy`'
|
11
Makefile
11
Makefile
|
@ -135,6 +135,10 @@ errcheck:
|
||||||
|
|
||||||
.PHONY: lint
|
.PHONY: lint
|
||||||
lint:
|
lint:
|
||||||
|
@echo 'make lint is depricated. Use "make revive" if you want to use the old lint tool, or "make golangci-lint" to run a complete code check.'
|
||||||
|
|
||||||
|
.PHONY: revive
|
||||||
|
revive:
|
||||||
@hash revive > /dev/null 2>&1; if [ $$? -ne 0 ]; then \
|
@hash revive > /dev/null 2>&1; if [ $$? -ne 0 ]; then \
|
||||||
$(GO) get -u github.com/mgechev/revive; \
|
$(GO) get -u github.com/mgechev/revive; \
|
||||||
fi
|
fi
|
||||||
|
@ -461,3 +465,10 @@ generate-images:
|
||||||
.PHONY: pr
|
.PHONY: pr
|
||||||
pr:
|
pr:
|
||||||
$(GO) run contrib/pr/checkout.go $(PR)
|
$(GO) run contrib/pr/checkout.go $(PR)
|
||||||
|
|
||||||
|
.PHONY: golangci-lint
|
||||||
|
golangci-lint:
|
||||||
|
@hash golangci-lint > /dev/null 2>&1; if [ $$? -ne 0 ]; then \
|
||||||
|
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.16.0; \
|
||||||
|
fi
|
||||||
|
golangci-lint run
|
||||||
|
|
|
@ -481,7 +481,7 @@ func runUpdateOauth(c *cli.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// update custom URL mapping
|
// update custom URL mapping
|
||||||
var customURLMapping *oauth2.CustomURLMapping
|
var customURLMapping = &oauth2.CustomURLMapping{}
|
||||||
|
|
||||||
if oAuth2Config.CustomURLMapping != nil {
|
if oAuth2Config.CustomURLMapping != nil {
|
||||||
customURLMapping.TokenURL = oAuth2Config.CustomURLMapping.TokenURL
|
customURLMapping.TokenURL = oAuth2Config.CustomURLMapping.TokenURL
|
||||||
|
|
21
cmd/cert.go
21
cmd/cert.go
|
@ -170,17 +170,28 @@ func runCert(c *cli.Context) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to open cert.pem for writing: %v", err)
|
log.Fatalf("Failed to open cert.pem for writing: %v", err)
|
||||||
}
|
}
|
||||||
pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
err = pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||||
certOut.Close()
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to encode certificate: %v", err)
|
||||||
|
}
|
||||||
|
err = certOut.Close()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to write cert: %v", err)
|
||||||
|
}
|
||||||
log.Println("Written cert.pem")
|
log.Println("Written cert.pem")
|
||||||
|
|
||||||
keyOut, err := os.OpenFile("key.pem", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
keyOut, err := os.OpenFile("key.pem", os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Failed to open key.pem for writing: %v", err)
|
log.Fatalf("Failed to open key.pem for writing: %v", err)
|
||||||
}
|
}
|
||||||
pem.Encode(keyOut, pemBlockForKey(priv))
|
err = pem.Encode(keyOut, pemBlockForKey(priv))
|
||||||
keyOut.Close()
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to encode key: %v", err)
|
||||||
|
}
|
||||||
|
err = keyOut.Close()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to write key: %v", err)
|
||||||
|
}
|
||||||
log.Println("Written key.pem")
|
log.Println("Written key.pem")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,6 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
accessDenied = "Repository does not exist or you do not have access"
|
|
||||||
lfsAuthenticateVerb = "git-lfs-authenticate"
|
lfsAuthenticateVerb = "git-lfs-authenticate"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -67,7 +66,7 @@ func checkLFSVersion() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func setup(logPath string) {
|
func setup(logPath string) {
|
||||||
log.DelLogger("console")
|
_ = log.DelLogger("console")
|
||||||
setting.NewContext()
|
setting.NewContext()
|
||||||
checkLFSVersion()
|
checkLFSVersion()
|
||||||
}
|
}
|
||||||
|
@ -112,7 +111,9 @@ func runServ(c *cli.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(c.Args()) < 1 {
|
if len(c.Args()) < 1 {
|
||||||
cli.ShowSubcommandHelp(c)
|
if err := cli.ShowSubcommandHelp(c); err != nil {
|
||||||
|
fmt.Printf("error showing subcommand help: %v\n", err)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -178,11 +178,16 @@ func runWeb(ctx *cli.Context) error {
|
||||||
}
|
}
|
||||||
err = runHTTPS(listenAddr, setting.CertFile, setting.KeyFile, context2.ClearHandler(m))
|
err = runHTTPS(listenAddr, setting.CertFile, setting.KeyFile, context2.ClearHandler(m))
|
||||||
case setting.FCGI:
|
case setting.FCGI:
|
||||||
listener, err := net.Listen("tcp", listenAddr)
|
var listener net.Listener
|
||||||
|
listener, err = net.Listen("tcp", listenAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal("Failed to bind %s: %v", listenAddr, err)
|
log.Fatal("Failed to bind %s: %v", listenAddr, err)
|
||||||
}
|
}
|
||||||
defer listener.Close()
|
defer func() {
|
||||||
|
if err := listener.Close(); err != nil {
|
||||||
|
log.Fatal("Failed to stop server: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
err = fcgi.Serve(listener, context2.ClearHandler(m))
|
err = fcgi.Serve(listener, context2.ClearHandler(m))
|
||||||
case setting.UnixSocket:
|
case setting.UnixSocket:
|
||||||
if err := os.Remove(listenAddr); err != nil && !os.IsNotExist(err) {
|
if err := os.Remove(listenAddr); err != nil && !os.IsNotExist(err) {
|
||||||
|
|
|
@ -91,8 +91,7 @@ func runPR() {
|
||||||
routers.NewServices()
|
routers.NewServices()
|
||||||
//x, err = xorm.NewEngine("sqlite3", "file::memory:?cache=shared")
|
//x, err = xorm.NewEngine("sqlite3", "file::memory:?cache=shared")
|
||||||
|
|
||||||
var helper testfixtures.Helper
|
var helper testfixtures.Helper = &testfixtures.SQLite{}
|
||||||
helper = &testfixtures.SQLite{}
|
|
||||||
models.NewEngine(func(_ *xorm.Engine) error {
|
models.NewEngine(func(_ *xorm.Engine) error {
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
|
@ -62,7 +62,7 @@ func branchAction(t *testing.T, button string) (*HTMLDoc, string) {
|
||||||
req = NewRequestWithValues(t, "POST", link, map[string]string{
|
req = NewRequestWithValues(t, "POST", link, map[string]string{
|
||||||
"_csrf": getCsrf(t, htmlDoc.doc),
|
"_csrf": getCsrf(t, htmlDoc.doc),
|
||||||
})
|
})
|
||||||
resp = session.MakeRequest(t, req, http.StatusOK)
|
session.MakeRequest(t, req, http.StatusOK)
|
||||||
|
|
||||||
url, err := url.Parse(link)
|
url, err := url.Parse(link)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
|
@ -34,7 +34,7 @@ func TestCreateFile(t *testing.T) {
|
||||||
"content": "Content",
|
"content": "Content",
|
||||||
"commit_choice": "direct",
|
"commit_choice": "direct",
|
||||||
})
|
})
|
||||||
resp = session.MakeRequest(t, req, http.StatusFound)
|
session.MakeRequest(t, req, http.StatusFound)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ func TestCreateFileOnProtectedBranch(t *testing.T) {
|
||||||
"_csrf": csrf,
|
"_csrf": csrf,
|
||||||
"protected": "on",
|
"protected": "on",
|
||||||
})
|
})
|
||||||
resp := session.MakeRequest(t, req, http.StatusFound)
|
session.MakeRequest(t, req, http.StatusFound)
|
||||||
// Check if master branch has been locked successfully
|
// Check if master branch has been locked successfully
|
||||||
flashCookie := session.GetCookie("macaron_flash")
|
flashCookie := session.GetCookie("macaron_flash")
|
||||||
assert.NotNil(t, flashCookie)
|
assert.NotNil(t, flashCookie)
|
||||||
|
@ -56,7 +56,7 @@ func TestCreateFileOnProtectedBranch(t *testing.T) {
|
||||||
|
|
||||||
// Request editor page
|
// Request editor page
|
||||||
req = NewRequest(t, "GET", "/user2/repo1/_new/master/")
|
req = NewRequest(t, "GET", "/user2/repo1/_new/master/")
|
||||||
resp = session.MakeRequest(t, req, http.StatusOK)
|
resp := session.MakeRequest(t, req, http.StatusOK)
|
||||||
|
|
||||||
doc := NewHTMLParser(t, resp.Body)
|
doc := NewHTMLParser(t, resp.Body)
|
||||||
lastCommit := doc.GetInputValueByName("last_commit")
|
lastCommit := doc.GetInputValueByName("last_commit")
|
||||||
|
|
|
@ -42,7 +42,7 @@ type NilResponseRecorder struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *NilResponseRecorder) Write(b []byte) (int, error) {
|
func (n *NilResponseRecorder) Write(b []byte) (int, error) {
|
||||||
n.Length = n.Length + len(b)
|
n.Length += len(b)
|
||||||
return len(b), nil
|
return len(b), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,8 +141,7 @@ func initIntegrationTest() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("sql.Open: %v", err)
|
log.Fatalf("sql.Open: %v", err)
|
||||||
}
|
}
|
||||||
rows, err := db.Query(fmt.Sprintf("SELECT 1 FROM pg_database WHERE datname = '%s'",
|
rows, err := db.Query(fmt.Sprintf("SELECT 1 FROM pg_database WHERE datname = '%s'", models.DbCfg.Name))
|
||||||
models.DbCfg.Name))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("db.Query: %v", err)
|
log.Fatalf("db.Query: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -210,7 +209,7 @@ func (s *TestSession) MakeRequest(t testing.TB, req *http.Request, expectedStatu
|
||||||
resp := MakeRequest(t, req, expectedStatus)
|
resp := MakeRequest(t, req, expectedStatus)
|
||||||
|
|
||||||
ch := http.Header{}
|
ch := http.Header{}
|
||||||
ch.Add("Cookie", strings.Join(resp.HeaderMap["Set-Cookie"], ";"))
|
ch.Add("Cookie", strings.Join(resp.Header()["Set-Cookie"], ";"))
|
||||||
cr := http.Request{Header: ch}
|
cr := http.Request{Header: ch}
|
||||||
s.jar.SetCookies(baseURL, cr.Cookies())
|
s.jar.SetCookies(baseURL, cr.Cookies())
|
||||||
|
|
||||||
|
@ -226,7 +225,7 @@ func (s *TestSession) MakeRequestNilResponseRecorder(t testing.TB, req *http.Req
|
||||||
resp := MakeRequestNilResponseRecorder(t, req, expectedStatus)
|
resp := MakeRequestNilResponseRecorder(t, req, expectedStatus)
|
||||||
|
|
||||||
ch := http.Header{}
|
ch := http.Header{}
|
||||||
ch.Add("Cookie", strings.Join(resp.HeaderMap["Set-Cookie"], ";"))
|
ch.Add("Cookie", strings.Join(resp.Header()["Set-Cookie"], ";"))
|
||||||
cr := http.Request{Header: ch}
|
cr := http.Request{Header: ch}
|
||||||
s.jar.SetCookies(baseURL, cr.Cookies())
|
s.jar.SetCookies(baseURL, cr.Cookies())
|
||||||
|
|
||||||
|
@ -266,7 +265,7 @@ func loginUserWithPassword(t testing.TB, userName, password string) *TestSession
|
||||||
resp = MakeRequest(t, req, http.StatusFound)
|
resp = MakeRequest(t, req, http.StatusFound)
|
||||||
|
|
||||||
ch := http.Header{}
|
ch := http.Header{}
|
||||||
ch.Add("Cookie", strings.Join(resp.HeaderMap["Set-Cookie"], ";"))
|
ch.Add("Cookie", strings.Join(resp.Header()["Set-Cookie"], ";"))
|
||||||
cr := http.Request{Header: ch}
|
cr := http.Request{Header: ch}
|
||||||
|
|
||||||
session := emptyTestSession(t)
|
session := emptyTestSession(t)
|
||||||
|
|
|
@ -45,7 +45,7 @@ func storeObjectInRepo(t *testing.T, repositoryID int64, content *[]byte) string
|
||||||
lfsMetaObject = &models.LFSMetaObject{Oid: oid, Size: int64(len(*content)), RepositoryID: repositoryID}
|
lfsMetaObject = &models.LFSMetaObject{Oid: oid, Size: int64(len(*content)), RepositoryID: repositoryID}
|
||||||
}
|
}
|
||||||
|
|
||||||
lfsID = lfsID + 1
|
lfsID++
|
||||||
lfsMetaObject, err = models.NewLFSMetaObject(lfsMetaObject)
|
lfsMetaObject, err = models.NewLFSMetaObject(lfsMetaObject)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
contentStore := &lfs.ContentStore{BasePath: setting.LFS.ContentPath}
|
contentStore := &lfs.ContentStore{BasePath: setting.LFS.ContentPath}
|
||||||
|
|
|
@ -57,21 +57,6 @@ func initMigrationTest(t *testing.T) {
|
||||||
setting.NewLogServices(true)
|
setting.NewLogServices(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getDialect() string {
|
|
||||||
dialect := "sqlite"
|
|
||||||
switch {
|
|
||||||
case setting.UseSQLite3:
|
|
||||||
dialect = "sqlite"
|
|
||||||
case setting.UseMySQL:
|
|
||||||
dialect = "mysql"
|
|
||||||
case setting.UsePostgreSQL:
|
|
||||||
dialect = "pgsql"
|
|
||||||
case setting.UseMSSQL:
|
|
||||||
dialect = "mssql"
|
|
||||||
}
|
|
||||||
return dialect
|
|
||||||
}
|
|
||||||
|
|
||||||
func availableVersions() ([]string, error) {
|
func availableVersions() ([]string, error) {
|
||||||
migrationsDir, err := os.Open("integrations/migration-test")
|
migrationsDir, err := os.Open("integrations/migration-test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -73,7 +73,7 @@ func PrintCurrentTest(t testing.TB, skip ...int) {
|
||||||
_, filename, line, _ := runtime.Caller(actualSkip)
|
_, filename, line, _ := runtime.Caller(actualSkip)
|
||||||
|
|
||||||
if log.CanColorStdout {
|
if log.CanColorStdout {
|
||||||
fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", log.NewColoredValue(t.Name()), strings.TrimPrefix(filename, prefix), line)
|
fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", fmt.Formatter(log.NewColoredValue(t.Name())), strings.TrimPrefix(filename, prefix), line)
|
||||||
} else {
|
} else {
|
||||||
fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", t.Name(), strings.TrimPrefix(filename, prefix), line)
|
fmt.Fprintf(os.Stdout, "=== %s (%s:%d)\n", t.Name(), strings.TrimPrefix(filename, prefix), line)
|
||||||
}
|
}
|
||||||
|
|
6
main.go
6
main.go
|
@ -42,7 +42,7 @@ var (
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
setting.AppVer = Version
|
setting.AppVer = Version
|
||||||
setting.AppBuiltWith = formatBuiltWith(Tags)
|
setting.AppBuiltWith = formatBuiltWith()
|
||||||
|
|
||||||
// Grab the original help templates
|
// Grab the original help templates
|
||||||
originalAppHelpTemplate = cli.AppHelpTemplate
|
originalAppHelpTemplate = cli.AppHelpTemplate
|
||||||
|
@ -56,7 +56,7 @@ func main() {
|
||||||
app.Usage = "A painless self-hosted Git service"
|
app.Usage = "A painless self-hosted Git service"
|
||||||
app.Description = `By default, gitea will start serving using the webserver with no
|
app.Description = `By default, gitea will start serving using the webserver with no
|
||||||
arguments - which can alternatively be run by running the subcommand web.`
|
arguments - which can alternatively be run by running the subcommand web.`
|
||||||
app.Version = Version + formatBuiltWith(Tags)
|
app.Version = Version + formatBuiltWith()
|
||||||
app.Commands = []cli.Command{
|
app.Commands = []cli.Command{
|
||||||
cmd.CmdWeb,
|
cmd.CmdWeb,
|
||||||
cmd.CmdServ,
|
cmd.CmdServ,
|
||||||
|
@ -179,7 +179,7 @@ DEFAULT CONFIGURATION:
|
||||||
`, originalTemplate, setting.CustomPath, overrided, setting.CustomConf, setting.AppPath, setting.AppWorkPath)
|
`, originalTemplate, setting.CustomPath, overrided, setting.CustomConf, setting.AppPath, setting.AppWorkPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func formatBuiltWith(makeTags string) string {
|
func formatBuiltWith() string {
|
||||||
var version = runtime.Version()
|
var version = runtime.Version()
|
||||||
if len(MakeVersion) > 0 {
|
if len(MakeVersion) > 0 {
|
||||||
version = MakeVersion + ", " + runtime.Version()
|
version = MakeVersion + ", " + runtime.Version()
|
||||||
|
|
|
@ -10,13 +10,6 @@ import (
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
var accessModes = []AccessMode{
|
|
||||||
AccessModeRead,
|
|
||||||
AccessModeWrite,
|
|
||||||
AccessModeAdmin,
|
|
||||||
AccessModeOwner,
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccessLevel(t *testing.T) {
|
func TestAccessLevel(t *testing.T) {
|
||||||
assert.NoError(t, PrepareTestDatabase())
|
assert.NoError(t, PrepareTestDatabase())
|
||||||
|
|
||||||
|
|
|
@ -126,14 +126,14 @@ func (protectBranch *ProtectedBranch) GetGrantedApprovalsCount(pr *PullRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetProtectedBranchByRepoID getting protected branch by repo ID
|
// GetProtectedBranchByRepoID getting protected branch by repo ID
|
||||||
func GetProtectedBranchByRepoID(RepoID int64) ([]*ProtectedBranch, error) {
|
func GetProtectedBranchByRepoID(repoID int64) ([]*ProtectedBranch, error) {
|
||||||
protectedBranches := make([]*ProtectedBranch, 0)
|
protectedBranches := make([]*ProtectedBranch, 0)
|
||||||
return protectedBranches, x.Where("repo_id = ?", RepoID).Desc("updated_unix").Find(&protectedBranches)
|
return protectedBranches, x.Where("repo_id = ?", repoID).Desc("updated_unix").Find(&protectedBranches)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetProtectedBranchBy getting protected branch by ID/Name
|
// GetProtectedBranchBy getting protected branch by ID/Name
|
||||||
func GetProtectedBranchBy(repoID int64, BranchName string) (*ProtectedBranch, error) {
|
func GetProtectedBranchBy(repoID int64, branchName string) (*ProtectedBranch, error) {
|
||||||
rel := &ProtectedBranch{RepoID: repoID, BranchName: BranchName}
|
rel := &ProtectedBranch{RepoID: repoID, BranchName: branchName}
|
||||||
has, err := x.Get(rel)
|
has, err := x.Get(rel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
@ -40,7 +40,7 @@ func (r *BlameReader) NextPart() (*BlamePart, error) {
|
||||||
scanner := r.scanner
|
scanner := r.scanner
|
||||||
|
|
||||||
if r.lastSha != nil {
|
if r.lastSha != nil {
|
||||||
blamePart = &BlamePart{*r.lastSha, make([]string, 0, 0)}
|
blamePart = &BlamePart{*r.lastSha, make([]string, 0)}
|
||||||
}
|
}
|
||||||
|
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
|
@ -56,7 +56,7 @@ func (r *BlameReader) NextPart() (*BlamePart, error) {
|
||||||
sha1 := lines[1]
|
sha1 := lines[1]
|
||||||
|
|
||||||
if blamePart == nil {
|
if blamePart == nil {
|
||||||
blamePart = &BlamePart{sha1, make([]string, 0, 0)}
|
blamePart = &BlamePart{sha1, make([]string, 0)}
|
||||||
}
|
}
|
||||||
|
|
||||||
if blamePart.Sha != sha1 {
|
if blamePart.Sha != sha1 {
|
||||||
|
|
|
@ -384,13 +384,9 @@ func CutDiffAroundLine(originalDiff io.Reader, line int64, old bool, numbersOfLi
|
||||||
// headers + hunk header
|
// headers + hunk header
|
||||||
newHunk := make([]string, headerLines)
|
newHunk := make([]string, headerLines)
|
||||||
// transfer existing headers
|
// transfer existing headers
|
||||||
for idx, lof := range hunk[:headerLines] {
|
copy(newHunk, hunk[:headerLines])
|
||||||
newHunk[idx] = lof
|
|
||||||
}
|
|
||||||
// transfer last n lines
|
// transfer last n lines
|
||||||
for _, lof := range hunk[len(hunk)-numbersOfLine-1:] {
|
newHunk = append(newHunk, hunk[len(hunk)-numbersOfLine-1:]...)
|
||||||
newHunk = append(newHunk, lof)
|
|
||||||
}
|
|
||||||
// calculate newBegin, ... by counting lines
|
// calculate newBegin, ... by counting lines
|
||||||
for i := len(hunk) - 1; i >= len(hunk)-numbersOfLine; i-- {
|
for i := len(hunk) - 1; i >= len(hunk)-numbersOfLine; i-- {
|
||||||
switch hunk[i][0] {
|
switch hunk[i][0] {
|
||||||
|
@ -582,7 +578,10 @@ func ParsePatch(maxLines, maxLineCharacters, maxFiles int, reader io.Reader) (*D
|
||||||
diff.Files = append(diff.Files, curFile)
|
diff.Files = append(diff.Files, curFile)
|
||||||
if len(diff.Files) >= maxFiles {
|
if len(diff.Files) >= maxFiles {
|
||||||
diff.IsIncomplete = true
|
diff.IsIncomplete = true
|
||||||
io.Copy(ioutil.Discard, reader)
|
_, err := io.Copy(ioutil.Discard, reader)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Copy: %v", err)
|
||||||
|
}
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
curFileLinesCount = 0
|
curFileLinesCount = 0
|
||||||
|
|
|
@ -17,12 +17,6 @@ func assertEqual(t *testing.T, s1 string, s2 template.HTML) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertLineEqual(t *testing.T, d1 *DiffLine, d2 *DiffLine) {
|
|
||||||
if d1 != d2 {
|
|
||||||
t.Errorf("%v should be equal %v", d1, d2)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestDiffToHTML(t *testing.T) {
|
func TestDiffToHTML(t *testing.T) {
|
||||||
assertEqual(t, "+foo <span class=\"added-code\">bar</span> biz", diffToHTML([]dmp.Diff{
|
assertEqual(t, "+foo <span class=\"added-code\">bar</span> biz", diffToHTML([]dmp.Diff{
|
||||||
{Type: dmp.DiffEqual, Text: "foo "},
|
{Type: dmp.DiffEqual, Text: "foo "},
|
||||||
|
|
|
@ -1330,7 +1330,7 @@ func sortIssuesSession(sess *xorm.Session, sortType string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (opts *IssuesOptions) setupSession(sess *xorm.Session) error {
|
func (opts *IssuesOptions) setupSession(sess *xorm.Session) {
|
||||||
if opts.Page >= 0 && opts.PageSize > 0 {
|
if opts.Page >= 0 && opts.PageSize > 0 {
|
||||||
var start int
|
var start int
|
||||||
if opts.Page == 0 {
|
if opts.Page == 0 {
|
||||||
|
@ -1389,7 +1389,6 @@ func (opts *IssuesOptions) setupSession(sess *xorm.Session) error {
|
||||||
fmt.Sprintf("issue.id = il%[1]d.issue_id AND il%[1]d.label_id = %[2]d", i, labelID))
|
fmt.Sprintf("issue.id = il%[1]d.issue_id AND il%[1]d.label_id = %[2]d", i, labelID))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CountIssuesByRepo map from repoID to number of issues matching the options
|
// CountIssuesByRepo map from repoID to number of issues matching the options
|
||||||
|
@ -1397,9 +1396,7 @@ func CountIssuesByRepo(opts *IssuesOptions) (map[int64]int64, error) {
|
||||||
sess := x.NewSession()
|
sess := x.NewSession()
|
||||||
defer sess.Close()
|
defer sess.Close()
|
||||||
|
|
||||||
if err := opts.setupSession(sess); err != nil {
|
opts.setupSession(sess)
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
countsSlice := make([]*struct {
|
countsSlice := make([]*struct {
|
||||||
RepoID int64
|
RepoID int64
|
||||||
|
@ -1424,9 +1421,7 @@ func Issues(opts *IssuesOptions) ([]*Issue, error) {
|
||||||
sess := x.NewSession()
|
sess := x.NewSession()
|
||||||
defer sess.Close()
|
defer sess.Close()
|
||||||
|
|
||||||
if err := opts.setupSession(sess); err != nil {
|
opts.setupSession(sess)
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
sortIssuesSession(sess, opts.SortType)
|
sortIssuesSession(sess, opts.SortType)
|
||||||
|
|
||||||
issues := make([]*Issue, 0, setting.UI.IssuePagingNum)
|
issues := make([]*Issue, 0, setting.UI.IssuePagingNum)
|
||||||
|
|
|
@ -171,17 +171,6 @@ func (c *Comment) loadPoster(e Engine) (err error) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Comment) loadAttachments(e Engine) (err error) {
|
|
||||||
if len(c.Attachments) > 0 {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.Attachments, err = getAttachmentsByCommentID(e, c.ID)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("getAttachmentsByCommentID[%d]: %v", c.ID, err)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// AfterDelete is invoked from XORM after the object is deleted.
|
// AfterDelete is invoked from XORM after the object is deleted.
|
||||||
func (c *Comment) AfterDelete() {
|
func (c *Comment) AfterDelete() {
|
||||||
if c.ID <= 0 {
|
if c.ID <= 0 {
|
||||||
|
@ -463,7 +452,7 @@ func (c *Comment) LoadReview() error {
|
||||||
return c.loadReview(x)
|
return c.loadReview(x)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Comment) checkInvalidation(e Engine, doer *User, repo *git.Repository, branch string) error {
|
func (c *Comment) checkInvalidation(doer *User, repo *git.Repository, branch string) error {
|
||||||
// FIXME differentiate between previous and proposed line
|
// FIXME differentiate between previous and proposed line
|
||||||
commit, err := repo.LineBlame(branch, repo.Path, c.TreePath, uint(c.UnsignedLine()))
|
commit, err := repo.LineBlame(branch, repo.Path, c.TreePath, uint(c.UnsignedLine()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -479,7 +468,7 @@ func (c *Comment) checkInvalidation(e Engine, doer *User, repo *git.Repository,
|
||||||
// CheckInvalidation checks if the line of code comment got changed by another commit.
|
// CheckInvalidation checks if the line of code comment got changed by another commit.
|
||||||
// If the line got changed the comment is going to be invalidated.
|
// If the line got changed the comment is going to be invalidated.
|
||||||
func (c *Comment) CheckInvalidation(repo *git.Repository, doer *User, branch string) error {
|
func (c *Comment) CheckInvalidation(repo *git.Repository, doer *User, branch string) error {
|
||||||
return c.checkInvalidation(x, doer, repo, branch)
|
return c.checkInvalidation(doer, repo, branch)
|
||||||
}
|
}
|
||||||
|
|
||||||
// DiffSide returns "previous" if Comment.Line is a LOC of the previous changes and "proposed" if it is a LOC of the proposed changes.
|
// DiffSide returns "previous" if Comment.Line is a LOC of the previous changes and "proposed" if it is a LOC of the proposed changes.
|
||||||
|
@ -915,7 +904,7 @@ func CreateCodeComment(doer *User, repo *Repository, issue *Issue, content, tree
|
||||||
commit, err := gitRepo.LineBlame(pr.GetGitRefName(), gitRepo.Path, treePath, uint(line))
|
commit, err := gitRepo.LineBlame(pr.GetGitRefName(), gitRepo.Path, treePath, uint(line))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
commitID = commit.ID.String()
|
commitID = commit.ID.String()
|
||||||
} else if err != nil && !strings.Contains(err.Error(), "exit status 128 - fatal: no such path") {
|
} else if !strings.Contains(err.Error(), "exit status 128 - fatal: no such path") {
|
||||||
return nil, fmt.Errorf("LineBlame[%s, %s, %s, %d]: %v", pr.GetGitRefName(), gitRepo.Path, treePath, line, err)
|
return nil, fmt.Errorf("LineBlame[%s, %s, %s, %d]: %v", pr.GetGitRefName(), gitRepo.Path, treePath, line, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -36,7 +36,7 @@ func (comments CommentList) loadPosters(e Engine) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
left = left - limit
|
left -= limit
|
||||||
posterIDs = posterIDs[limit:]
|
posterIDs = posterIDs[limit:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -94,13 +94,13 @@ func (comments CommentList) loadLabels(e Engine) error {
|
||||||
var label Label
|
var label Label
|
||||||
err = rows.Scan(&label)
|
err = rows.Scan(&label)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows.Close()
|
_ = rows.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
commentLabels[label.ID] = &label
|
commentLabels[label.ID] = &label
|
||||||
}
|
}
|
||||||
rows.Close()
|
_ = rows.Close()
|
||||||
left = left - limit
|
left -= limit
|
||||||
labelIDs = labelIDs[limit:]
|
labelIDs = labelIDs[limit:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ func (comments CommentList) loadMilestones(e Engine) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
left = left - limit
|
left -= limit
|
||||||
milestoneIDs = milestoneIDs[limit:]
|
milestoneIDs = milestoneIDs[limit:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,7 +186,7 @@ func (comments CommentList) loadOldMilestones(e Engine) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
left = left - limit
|
left -= limit
|
||||||
milestoneIDs = milestoneIDs[limit:]
|
milestoneIDs = milestoneIDs[limit:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -236,9 +236,9 @@ func (comments CommentList) loadAssignees(e Engine) error {
|
||||||
|
|
||||||
assignees[user.ID] = &user
|
assignees[user.ID] = &user
|
||||||
}
|
}
|
||||||
rows.Close()
|
_ = rows.Close()
|
||||||
|
|
||||||
left = left - limit
|
left -= limit
|
||||||
assigneeIDs = assigneeIDs[limit:]
|
assigneeIDs = assigneeIDs[limit:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -310,9 +310,9 @@ func (comments CommentList) loadIssues(e Engine) error {
|
||||||
|
|
||||||
issues[issue.ID] = &issue
|
issues[issue.ID] = &issue
|
||||||
}
|
}
|
||||||
rows.Close()
|
_ = rows.Close()
|
||||||
|
|
||||||
left = left - limit
|
left -= limit
|
||||||
issueIDs = issueIDs[limit:]
|
issueIDs = issueIDs[limit:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -361,15 +361,15 @@ func (comments CommentList) loadDependentIssues(e Engine) error {
|
||||||
var issue Issue
|
var issue Issue
|
||||||
err = rows.Scan(&issue)
|
err = rows.Scan(&issue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows.Close()
|
_ = rows.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
issues[issue.ID] = &issue
|
issues[issue.ID] = &issue
|
||||||
}
|
}
|
||||||
rows.Close()
|
_ = rows.Close()
|
||||||
|
|
||||||
left = left - limit
|
left -= limit
|
||||||
issueIDs = issueIDs[limit:]
|
issueIDs = issueIDs[limit:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -406,14 +406,14 @@ func (comments CommentList) loadAttachments(e Engine) (err error) {
|
||||||
var attachment Attachment
|
var attachment Attachment
|
||||||
err = rows.Scan(&attachment)
|
err = rows.Scan(&attachment)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows.Close()
|
_ = rows.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
attachments[attachment.CommentID] = append(attachments[attachment.CommentID], &attachment)
|
attachments[attachment.CommentID] = append(attachments[attachment.CommentID], &attachment)
|
||||||
}
|
}
|
||||||
|
|
||||||
rows.Close()
|
_ = rows.Close()
|
||||||
left = left - limit
|
left -= limit
|
||||||
commentsIDs = commentsIDs[limit:]
|
commentsIDs = commentsIDs[limit:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -457,15 +457,15 @@ func (comments CommentList) loadReviews(e Engine) error {
|
||||||
var review Review
|
var review Review
|
||||||
err = rows.Scan(&review)
|
err = rows.Scan(&review)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows.Close()
|
_ = rows.Close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
reviews[review.ID] = &review
|
reviews[review.ID] = &review
|
||||||
}
|
}
|
||||||
rows.Close()
|
_ = rows.Close()
|
||||||
|
|
||||||
left = left - limit
|
left -= limit
|
||||||
reviewIDs = reviewIDs[limit:]
|
reviewIDs = reviewIDs[limit:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -401,14 +401,6 @@ func NewIssueLabels(issue *Issue, labels []*Label, doer *User) (err error) {
|
||||||
return sess.Commit()
|
return sess.Commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
func getIssueLabels(e Engine, issueID int64) ([]*IssueLabel, error) {
|
|
||||||
issueLabels := make([]*IssueLabel, 0, 10)
|
|
||||||
return issueLabels, e.
|
|
||||||
Where("issue_id=?", issueID).
|
|
||||||
Asc("label_id").
|
|
||||||
Find(&issueLabels)
|
|
||||||
}
|
|
||||||
|
|
||||||
func deleteIssueLabel(e *xorm.Session, issue *Issue, label *Label, doer *User) (err error) {
|
func deleteIssueLabel(e *xorm.Session, issue *Issue, label *Label, doer *User) (err error) {
|
||||||
if count, err := e.Delete(&IssueLabel{
|
if count, err := e.Delete(&IssueLabel{
|
||||||
IssueID: issue.ID,
|
IssueID: issue.ID,
|
||||||
|
|
|
@ -7,6 +7,8 @@ package models
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
|
||||||
"github.com/go-xorm/builder"
|
"github.com/go-xorm/builder"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -47,7 +49,7 @@ func (issues IssueList) loadRepositories(e Engine) ([]*Repository, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("find repository: %v", err)
|
return nil, fmt.Errorf("find repository: %v", err)
|
||||||
}
|
}
|
||||||
left = left - limit
|
left -= limit
|
||||||
repoIDs = repoIDs[limit:]
|
repoIDs = repoIDs[limit:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,7 +93,7 @@ func (issues IssueList) loadPosters(e Engine) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
left = left - limit
|
left -= limit
|
||||||
posterIDs = posterIDs[limit:]
|
posterIDs = posterIDs[limit:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -146,13 +148,21 @@ func (issues IssueList) loadLabels(e Engine) error {
|
||||||
var labelIssue LabelIssue
|
var labelIssue LabelIssue
|
||||||
err = rows.Scan(&labelIssue)
|
err = rows.Scan(&labelIssue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows.Close()
|
// When there are no rows left and we try to close it, xorm will complain with an error.
|
||||||
|
// Since that is not relevant for us, we can safely ignore it.
|
||||||
|
if err := rows.Close(); err != nil {
|
||||||
|
log.Error("IssueList.loadLabels: Close: %v", err)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
issueLabels[labelIssue.IssueLabel.IssueID] = append(issueLabels[labelIssue.IssueLabel.IssueID], labelIssue.Label)
|
issueLabels[labelIssue.IssueLabel.IssueID] = append(issueLabels[labelIssue.IssueLabel.IssueID], labelIssue.Label)
|
||||||
}
|
}
|
||||||
rows.Close()
|
// When there are no rows left and we try to close it, xorm will complain with an error.
|
||||||
left = left - limit
|
// Since that is not relevant for us, we can safely ignore it.
|
||||||
|
if err := rows.Close(); err != nil {
|
||||||
|
log.Error("IssueList.loadLabels: Close: %v", err)
|
||||||
|
}
|
||||||
|
left -= limit
|
||||||
issueIDs = issueIDs[limit:]
|
issueIDs = issueIDs[limit:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -191,7 +201,7 @@ func (issues IssueList) loadMilestones(e Engine) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
left = left - limit
|
left -= limit
|
||||||
milestoneIDs = milestoneIDs[limit:]
|
milestoneIDs = milestoneIDs[limit:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -231,15 +241,22 @@ func (issues IssueList) loadAssignees(e Engine) error {
|
||||||
var assigneeIssue AssigneeIssue
|
var assigneeIssue AssigneeIssue
|
||||||
err = rows.Scan(&assigneeIssue)
|
err = rows.Scan(&assigneeIssue)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows.Close()
|
// When there are no rows left and we try to close it, xorm will complain with an error.
|
||||||
|
// Since that is not relevant for us, we can safely ignore it.
|
||||||
|
if err := rows.Close(); err != nil {
|
||||||
|
log.Error("IssueList.loadAssignees: Close: %v", err)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
assignees[assigneeIssue.IssueAssignee.IssueID] = append(assignees[assigneeIssue.IssueAssignee.IssueID], assigneeIssue.Assignee)
|
assignees[assigneeIssue.IssueAssignee.IssueID] = append(assignees[assigneeIssue.IssueAssignee.IssueID], assigneeIssue.Assignee)
|
||||||
}
|
}
|
||||||
rows.Close()
|
// When there are no rows left and we try to close it, xorm will complain with an error.
|
||||||
|
// Since that is not relevant for us, we can safely ignore it.
|
||||||
left = left - limit
|
if err := rows.Close(); err != nil {
|
||||||
|
log.Error("IssueList.loadAssignees: Close: %v", err)
|
||||||
|
}
|
||||||
|
left -= limit
|
||||||
issueIDs = issueIDs[limit:]
|
issueIDs = issueIDs[limit:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -283,14 +300,21 @@ func (issues IssueList) loadPullRequests(e Engine) error {
|
||||||
var pr PullRequest
|
var pr PullRequest
|
||||||
err = rows.Scan(&pr)
|
err = rows.Scan(&pr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows.Close()
|
// When there are no rows left and we try to close it, xorm will complain with an error.
|
||||||
|
// Since that is not relevant for us, we can safely ignore it.
|
||||||
|
if err := rows.Close(); err != nil {
|
||||||
|
log.Error("IssueList.loadPullRequests: Close: %v", err)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
pullRequestMaps[pr.IssueID] = &pr
|
pullRequestMaps[pr.IssueID] = &pr
|
||||||
}
|
}
|
||||||
|
// When there are no rows left and we try to close it, xorm will complain with an error.
|
||||||
rows.Close()
|
// Since that is not relevant for us, we can safely ignore it.
|
||||||
left = left - limit
|
if err := rows.Close(); err != nil {
|
||||||
|
log.Error("IssueList.loadPullRequests: Close: %v", err)
|
||||||
|
}
|
||||||
|
left -= limit
|
||||||
issuesIDs = issuesIDs[limit:]
|
issuesIDs = issuesIDs[limit:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -325,14 +349,21 @@ func (issues IssueList) loadAttachments(e Engine) (err error) {
|
||||||
var attachment Attachment
|
var attachment Attachment
|
||||||
err = rows.Scan(&attachment)
|
err = rows.Scan(&attachment)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows.Close()
|
// When there are no rows left and we try to close it, xorm will complain with an error.
|
||||||
|
// Since that is not relevant for us, we can safely ignore it.
|
||||||
|
if err := rows.Close(); err != nil {
|
||||||
|
log.Error("IssueList.loadAttachments: Close: %v", err)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
attachments[attachment.IssueID] = append(attachments[attachment.IssueID], &attachment)
|
attachments[attachment.IssueID] = append(attachments[attachment.IssueID], &attachment)
|
||||||
}
|
}
|
||||||
|
// When there are no rows left and we try to close it, xorm will complain with an error.
|
||||||
rows.Close()
|
// Since that is not relevant for us, we can safely ignore it.
|
||||||
left = left - limit
|
if err := rows.Close(); err != nil {
|
||||||
|
log.Error("IssueList.loadAttachments: Close: %v", err)
|
||||||
|
}
|
||||||
|
left -= limit
|
||||||
issuesIDs = issuesIDs[limit:]
|
issuesIDs = issuesIDs[limit:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -368,13 +399,21 @@ func (issues IssueList) loadComments(e Engine, cond builder.Cond) (err error) {
|
||||||
var comment Comment
|
var comment Comment
|
||||||
err = rows.Scan(&comment)
|
err = rows.Scan(&comment)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows.Close()
|
// When there are no rows left and we try to close it, xorm will complain with an error.
|
||||||
|
// Since that is not relevant for us, we can safely ignore it.
|
||||||
|
if err := rows.Close(); err != nil {
|
||||||
|
log.Error("IssueList.loadComments: Close: %v", err)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
comments[comment.IssueID] = append(comments[comment.IssueID], &comment)
|
comments[comment.IssueID] = append(comments[comment.IssueID], &comment)
|
||||||
}
|
}
|
||||||
rows.Close()
|
// When there are no rows left and we try to close it, xorm will complain with an error.
|
||||||
left = left - limit
|
// Since that is not relevant for us, we can safely ignore it.
|
||||||
|
if err := rows.Close(); err != nil {
|
||||||
|
log.Error("IssueList.loadComments: Close: %v", err)
|
||||||
|
}
|
||||||
|
left -= limit
|
||||||
issuesIDs = issuesIDs[limit:]
|
issuesIDs = issuesIDs[limit:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -422,13 +461,21 @@ func (issues IssueList) loadTotalTrackedTimes(e Engine) (err error) {
|
||||||
var totalTime totalTimesByIssue
|
var totalTime totalTimesByIssue
|
||||||
err = rows.Scan(&totalTime)
|
err = rows.Scan(&totalTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rows.Close()
|
// When there are no rows left and we try to close it, xorm will complain with an error.
|
||||||
|
// Since that is not relevant for us, we can safely ignore it.
|
||||||
|
if err := rows.Close(); err != nil {
|
||||||
|
log.Error("IssueList.loadTotalTrackedTimes: Close: %v", err)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
trackedTimes[totalTime.IssueID] = totalTime.Time
|
trackedTimes[totalTime.IssueID] = totalTime.Time
|
||||||
}
|
}
|
||||||
rows.Close()
|
// When there are no rows left and we try to close it, xorm will complain with an error.
|
||||||
left = left - limit
|
// Since that is not relevant for us, we can safely ignore it.
|
||||||
|
if err := rows.Close(); err != nil {
|
||||||
|
log.Error("IssueList.loadTotalTrackedTimes: Close: %v", err)
|
||||||
|
}
|
||||||
|
left -= limit
|
||||||
ids = ids[limit:]
|
ids = ids[limit:]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -439,33 +486,33 @@ func (issues IssueList) loadTotalTrackedTimes(e Engine) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// loadAttributes loads all attributes, expect for attachments and comments
|
// loadAttributes loads all attributes, expect for attachments and comments
|
||||||
func (issues IssueList) loadAttributes(e Engine) (err error) {
|
func (issues IssueList) loadAttributes(e Engine) error {
|
||||||
if _, err = issues.loadRepositories(e); err != nil {
|
if _, err := issues.loadRepositories(e); err != nil {
|
||||||
return
|
return fmt.Errorf("issue.loadAttributes: loadRepositories: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = issues.loadPosters(e); err != nil {
|
if err := issues.loadPosters(e); err != nil {
|
||||||
return
|
return fmt.Errorf("issue.loadAttributes: loadPosters: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = issues.loadLabels(e); err != nil {
|
if err := issues.loadLabels(e); err != nil {
|
||||||
return
|
return fmt.Errorf("issue.loadAttributes: loadLabels: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = issues.loadMilestones(e); err != nil {
|
if err := issues.loadMilestones(e); err != nil {
|
||||||
return
|
return fmt.Errorf("issue.loadAttributes: loadMilestones: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = issues.loadAssignees(e); err != nil {
|
if err := issues.loadAssignees(e); err != nil {
|
||||||
return
|
return fmt.Errorf("issue.loadAttributes: loadAssignees: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = issues.loadPullRequests(e); err != nil {
|
if err := issues.loadPullRequests(e); err != nil {
|
||||||
return
|
return fmt.Errorf("issue.loadAttributes: loadPullRequests: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = issues.loadTotalTrackedTimes(e); err != nil {
|
if err := issues.loadTotalTrackedTimes(e); err != nil {
|
||||||
return
|
return fmt.Errorf("issue.loadAttributes: loadTotalTrackedTimes: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -15,7 +15,6 @@ import (
|
||||||
// XORMLogBridge a logger bridge from Logger to xorm
|
// XORMLogBridge a logger bridge from Logger to xorm
|
||||||
type XORMLogBridge struct {
|
type XORMLogBridge struct {
|
||||||
showSQL bool
|
showSQL bool
|
||||||
level core.LogLevel
|
|
||||||
logger *log.Logger
|
logger *log.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -34,42 +33,42 @@ func (l *XORMLogBridge) Log(skip int, level log.Level, format string, v ...inter
|
||||||
|
|
||||||
// Debug show debug log
|
// Debug show debug log
|
||||||
func (l *XORMLogBridge) Debug(v ...interface{}) {
|
func (l *XORMLogBridge) Debug(v ...interface{}) {
|
||||||
l.Log(2, log.DEBUG, fmt.Sprint(v...))
|
_ = l.Log(2, log.DEBUG, fmt.Sprint(v...))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Debugf show debug log
|
// Debugf show debug log
|
||||||
func (l *XORMLogBridge) Debugf(format string, v ...interface{}) {
|
func (l *XORMLogBridge) Debugf(format string, v ...interface{}) {
|
||||||
l.Log(2, log.DEBUG, format, v...)
|
_ = l.Log(2, log.DEBUG, format, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error show error log
|
// Error show error log
|
||||||
func (l *XORMLogBridge) Error(v ...interface{}) {
|
func (l *XORMLogBridge) Error(v ...interface{}) {
|
||||||
l.Log(2, log.ERROR, fmt.Sprint(v...))
|
_ = l.Log(2, log.ERROR, fmt.Sprint(v...))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Errorf show error log
|
// Errorf show error log
|
||||||
func (l *XORMLogBridge) Errorf(format string, v ...interface{}) {
|
func (l *XORMLogBridge) Errorf(format string, v ...interface{}) {
|
||||||
l.Log(2, log.ERROR, format, v...)
|
_ = l.Log(2, log.ERROR, format, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Info show information level log
|
// Info show information level log
|
||||||
func (l *XORMLogBridge) Info(v ...interface{}) {
|
func (l *XORMLogBridge) Info(v ...interface{}) {
|
||||||
l.Log(2, log.INFO, fmt.Sprint(v...))
|
_ = l.Log(2, log.INFO, fmt.Sprint(v...))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Infof show information level log
|
// Infof show information level log
|
||||||
func (l *XORMLogBridge) Infof(format string, v ...interface{}) {
|
func (l *XORMLogBridge) Infof(format string, v ...interface{}) {
|
||||||
l.Log(2, log.INFO, format, v...)
|
_ = l.Log(2, log.INFO, format, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warn show warning log
|
// Warn show warning log
|
||||||
func (l *XORMLogBridge) Warn(v ...interface{}) {
|
func (l *XORMLogBridge) Warn(v ...interface{}) {
|
||||||
l.Log(2, log.WARN, fmt.Sprint(v...))
|
_ = l.Log(2, log.WARN, fmt.Sprint(v...))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Warnf show warnning log
|
// Warnf show warnning log
|
||||||
func (l *XORMLogBridge) Warnf(format string, v ...interface{}) {
|
func (l *XORMLogBridge) Warnf(format string, v ...interface{}) {
|
||||||
l.Log(2, log.WARN, format, v...)
|
_ = l.Log(2, log.WARN, format, v...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Level get logger level
|
// Level get logger level
|
||||||
|
|
|
@ -164,8 +164,7 @@ func Cell2Int64(val xorm.Cell) int64 {
|
||||||
|
|
||||||
// BeforeSet is invoked from XORM before setting the value of a field of this object.
|
// BeforeSet is invoked from XORM before setting the value of a field of this object.
|
||||||
func (source *LoginSource) BeforeSet(colName string, val xorm.Cell) {
|
func (source *LoginSource) BeforeSet(colName string, val xorm.Cell) {
|
||||||
switch colName {
|
if colName == "type" {
|
||||||
case "type":
|
|
||||||
switch LoginType(Cell2Int64(val)) {
|
switch LoginType(Cell2Int64(val)) {
|
||||||
case LoginLDAP, LoginDLDAP:
|
case LoginLDAP, LoginDLDAP:
|
||||||
source.Cfg = new(LDAPConfig)
|
source.Cfg = new(LDAPConfig)
|
||||||
|
@ -282,10 +281,12 @@ func CreateLoginSource(source *LoginSource) error {
|
||||||
oAuth2Config := source.OAuth2()
|
oAuth2Config := source.OAuth2()
|
||||||
err = oauth2.RegisterProvider(source.Name, oAuth2Config.Provider, oAuth2Config.ClientID, oAuth2Config.ClientSecret, oAuth2Config.OpenIDConnectAutoDiscoveryURL, oAuth2Config.CustomURLMapping)
|
err = oauth2.RegisterProvider(source.Name, oAuth2Config.Provider, oAuth2Config.ClientID, oAuth2Config.ClientSecret, oAuth2Config.OpenIDConnectAutoDiscoveryURL, oAuth2Config.CustomURLMapping)
|
||||||
err = wrapOpenIDConnectInitializeError(err, source.Name, oAuth2Config)
|
err = wrapOpenIDConnectInitializeError(err, source.Name, oAuth2Config)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// remove the LoginSource in case of errors while registering OAuth2 providers
|
// remove the LoginSource in case of errors while registering OAuth2 providers
|
||||||
x.Delete(source)
|
if _, err := x.Delete(source); err != nil {
|
||||||
|
log.Error("CreateLoginSource: Error while wrapOpenIDConnectInitializeError: %v", err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
@ -325,10 +326,12 @@ func UpdateSource(source *LoginSource) error {
|
||||||
oAuth2Config := source.OAuth2()
|
oAuth2Config := source.OAuth2()
|
||||||
err = oauth2.RegisterProvider(source.Name, oAuth2Config.Provider, oAuth2Config.ClientID, oAuth2Config.ClientSecret, oAuth2Config.OpenIDConnectAutoDiscoveryURL, oAuth2Config.CustomURLMapping)
|
err = oauth2.RegisterProvider(source.Name, oAuth2Config.Provider, oAuth2Config.ClientID, oAuth2Config.ClientSecret, oAuth2Config.OpenIDConnectAutoDiscoveryURL, oAuth2Config.CustomURLMapping)
|
||||||
err = wrapOpenIDConnectInitializeError(err, source.Name, oAuth2Config)
|
err = wrapOpenIDConnectInitializeError(err, source.Name, oAuth2Config)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// restore original values since we cannot update the provider it self
|
// restore original values since we cannot update the provider it self
|
||||||
x.ID(source.ID).AllCols().Update(originalLoginSource)
|
if _, err := x.ID(source.ID).AllCols().Update(originalLoginSource); err != nil {
|
||||||
|
log.Error("UpdateSource: Error while wrapOpenIDConnectInitializeError: %v", err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
@ -385,7 +388,7 @@ func composeFullName(firstname, surname, username string) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
alphaDashDotPattern = regexp.MustCompile("[^\\w-\\.]")
|
alphaDashDotPattern = regexp.MustCompile(`[^\w-\.]`)
|
||||||
)
|
)
|
||||||
|
|
||||||
// LoginViaLDAP queries if login/password is valid against the LDAP directory pool,
|
// LoginViaLDAP queries if login/password is valid against the LDAP directory pool,
|
||||||
|
@ -401,7 +404,7 @@ func LoginViaLDAP(user *User, login, password string, source *LoginSource, autoR
|
||||||
|
|
||||||
if !autoRegister {
|
if !autoRegister {
|
||||||
if isAttributeSSHPublicKeySet && synchronizeLdapSSHPublicKeys(user, source, sr.SSHPublicKey) {
|
if isAttributeSSHPublicKeySet && synchronizeLdapSSHPublicKeys(user, source, sr.SSHPublicKey) {
|
||||||
RewriteAllPublicKeys()
|
return user, RewriteAllPublicKeys()
|
||||||
}
|
}
|
||||||
|
|
||||||
return user, nil
|
return user, nil
|
||||||
|
@ -435,7 +438,7 @@ func LoginViaLDAP(user *User, login, password string, source *LoginSource, autoR
|
||||||
err := CreateUser(user)
|
err := CreateUser(user)
|
||||||
|
|
||||||
if err == nil && isAttributeSSHPublicKeySet && addLdapSSHPublicKeys(user, source, sr.SSHPublicKey) {
|
if err == nil && isAttributeSSHPublicKeySet && addLdapSSHPublicKeys(user, source, sr.SSHPublicKey) {
|
||||||
RewriteAllPublicKeys()
|
err = RewriteAllPublicKeys()
|
||||||
}
|
}
|
||||||
|
|
||||||
return user, err
|
return user, err
|
||||||
|
|
|
@ -157,10 +157,13 @@ func composeTplData(subject, body, link string) map[string]interface{} {
|
||||||
|
|
||||||
func composeIssueCommentMessage(issue *Issue, doer *User, content string, comment *Comment, tplName base.TplName, tos []string, info string) *mailer.Message {
|
func composeIssueCommentMessage(issue *Issue, doer *User, content string, comment *Comment, tplName base.TplName, tos []string, info string) *mailer.Message {
|
||||||
subject := issue.mailSubject()
|
subject := issue.mailSubject()
|
||||||
issue.LoadRepo()
|
err := issue.LoadRepo()
|
||||||
|
if err != nil {
|
||||||
|
log.Error("LoadRepo: %v", err)
|
||||||
|
}
|
||||||
body := string(markup.RenderByType(markdown.MarkupName, []byte(content), issue.Repo.HTMLURL(), issue.Repo.ComposeMetas()))
|
body := string(markup.RenderByType(markdown.MarkupName, []byte(content), issue.Repo.HTMLURL(), issue.Repo.ComposeMetas()))
|
||||||
|
|
||||||
data := make(map[string]interface{}, 10)
|
var data = make(map[string]interface{}, 10)
|
||||||
if comment != nil {
|
if comment != nil {
|
||||||
data = composeTplData(subject, body, issue.HTMLURL()+"#"+comment.HashTag())
|
data = composeTplData(subject, body, issue.HTMLURL()+"#"+comment.HashTag())
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -399,7 +399,7 @@ func trimCommitActionAppURLPrefix(x *xorm.Engine) error {
|
||||||
return fmt.Errorf("marshal action content[%d]: %v", actID, err)
|
return fmt.Errorf("marshal action content[%d]: %v", actID, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = sess.Id(actID).Update(&Action{
|
if _, err = sess.ID(actID).Update(&Action{
|
||||||
Content: string(p),
|
Content: string(p),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
return fmt.Errorf("update action[%d]: %v", actID, err)
|
return fmt.Errorf("update action[%d]: %v", actID, err)
|
||||||
|
@ -503,7 +503,7 @@ func attachmentRefactor(x *xorm.Engine) error {
|
||||||
|
|
||||||
// Update database first because this is where error happens the most often.
|
// Update database first because this is where error happens the most often.
|
||||||
for _, attach := range attachments {
|
for _, attach := range attachments {
|
||||||
if _, err = sess.Id(attach.ID).Update(attach); err != nil {
|
if _, err = sess.ID(attach.ID).Update(attach); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -581,7 +581,7 @@ func renamePullRequestFields(x *xorm.Engine) (err error) {
|
||||||
if pull.Index == 0 {
|
if pull.Index == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if _, err = sess.Id(pull.ID).Update(pull); err != nil {
|
if _, err = sess.ID(pull.ID).Update(pull); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -661,7 +661,7 @@ func generateOrgRandsAndSalt(x *xorm.Engine) (err error) {
|
||||||
if org.Salt, err = generate.GetRandomString(10); err != nil {
|
if org.Salt, err = generate.GetRandomString(10); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err = sess.Id(org.ID).Update(org); err != nil {
|
if _, err = sess.ID(org.ID).Update(org); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -58,13 +58,13 @@ func convertIntervalToDuration(x *xorm.Engine) (err error) {
|
||||||
return fmt.Errorf("Query repositories: %v", err)
|
return fmt.Errorf("Query repositories: %v", err)
|
||||||
}
|
}
|
||||||
for _, mirror := range mirrors {
|
for _, mirror := range mirrors {
|
||||||
mirror.Interval = mirror.Interval * time.Hour
|
mirror.Interval *= time.Hour
|
||||||
if mirror.Interval < setting.Mirror.MinInterval {
|
if mirror.Interval < setting.Mirror.MinInterval {
|
||||||
log.Info("Mirror interval less than Mirror.MinInterval, setting default interval: repo id %v", mirror.RepoID)
|
log.Info("Mirror interval less than Mirror.MinInterval, setting default interval: repo id %v", mirror.RepoID)
|
||||||
mirror.Interval = setting.Mirror.DefaultInterval
|
mirror.Interval = setting.Mirror.DefaultInterval
|
||||||
}
|
}
|
||||||
log.Debug("Mirror interval set to %v for repo id %v", mirror.Interval, mirror.RepoID)
|
log.Debug("Mirror interval set to %v for repo id %v", mirror.Interval, mirror.RepoID)
|
||||||
_, err := sess.Id(mirror.ID).Cols("interval").Update(mirror)
|
_, err := sess.ID(mirror.ID).Cols("interval").Update(mirror)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("update mirror interval failed: %v", err)
|
return fmt.Errorf("update mirror interval failed: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,6 +48,9 @@ func renameRepoIsBareToIsEmpty(x *xorm.Engine) error {
|
||||||
|
|
||||||
if len(indexes) >= 1 {
|
if len(indexes) >= 1 {
|
||||||
_, err = sess.Exec("DROP INDEX IDX_repository_is_bare ON repository")
|
_, err = sess.Exec("DROP INDEX IDX_repository_is_bare ON repository")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Drop index failed: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
_, err = sess.Exec("DROP INDEX IDX_repository_is_bare ON repository")
|
_, err = sess.Exec("DROP INDEX IDX_repository_is_bare ON repository")
|
||||||
|
|
|
@ -58,6 +58,9 @@ func hashAppToken(x *xorm.Engine) error {
|
||||||
|
|
||||||
if len(indexes) >= 1 {
|
if len(indexes) >= 1 {
|
||||||
_, err = sess.Exec("DROP INDEX UQE_access_token_sha1 ON access_token")
|
_, err = sess.Exec("DROP INDEX UQE_access_token_sha1 ON access_token")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
_, err = sess.Exec("DROP INDEX UQE_access_token_sha1 ON access_token")
|
_, err = sess.Exec("DROP INDEX UQE_access_token_sha1 ON access_token")
|
||||||
|
|
|
@ -48,6 +48,7 @@ type Engine interface {
|
||||||
Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *xorm.Session
|
Join(joinOperator string, tablename interface{}, condition string, args ...interface{}) *xorm.Session
|
||||||
SQL(interface{}, ...interface{}) *xorm.Session
|
SQL(interface{}, ...interface{}) *xorm.Session
|
||||||
Where(interface{}, ...interface{}) *xorm.Session
|
Where(interface{}, ...interface{}) *xorm.Session
|
||||||
|
Asc(colNames ...string) *xorm.Session
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -181,14 +182,14 @@ func parsePostgreSQLHostPort(info string) (string, string) {
|
||||||
return host, port
|
return host, port
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPostgreSQLConnectionString(DBHost, DBUser, DBPasswd, DBName, DBParam, DBSSLMode string) (connStr string) {
|
func getPostgreSQLConnectionString(dbHost, dbUser, dbPasswd, dbName, dbParam, dbsslMode string) (connStr string) {
|
||||||
host, port := parsePostgreSQLHostPort(DBHost)
|
host, port := parsePostgreSQLHostPort(dbHost)
|
||||||
if host[0] == '/' { // looks like a unix socket
|
if host[0] == '/' { // looks like a unix socket
|
||||||
connStr = fmt.Sprintf("postgres://%s:%s@:%s/%s%ssslmode=%s&host=%s",
|
connStr = fmt.Sprintf("postgres://%s:%s@:%s/%s%ssslmode=%s&host=%s",
|
||||||
url.PathEscape(DBUser), url.PathEscape(DBPasswd), port, DBName, DBParam, DBSSLMode, host)
|
url.PathEscape(dbUser), url.PathEscape(dbPasswd), port, dbName, dbParam, dbsslMode, host)
|
||||||
} else {
|
} else {
|
||||||
connStr = fmt.Sprintf("postgres://%s:%s@%s:%s/%s%ssslmode=%s",
|
connStr = fmt.Sprintf("postgres://%s:%s@%s:%s/%s%ssslmode=%s",
|
||||||
url.PathEscape(DBUser), url.PathEscape(DBPasswd), host, port, DBName, DBParam, DBSSLMode)
|
url.PathEscape(dbUser), url.PathEscape(dbPasswd), host, port, dbName, dbParam, dbsslMode)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -119,7 +119,10 @@ func createOrUpdateIssueNotifications(e Engine, issue *Issue, notificationAuthor
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
issue.loadRepo(e)
|
err = issue.loadRepo(e)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
for _, watch := range watches {
|
for _, watch := range watches {
|
||||||
issue.Repo.Units = nil
|
issue.Repo.Units = nil
|
||||||
|
|
|
@ -106,7 +106,10 @@ func InitOAuth2() error {
|
||||||
|
|
||||||
for _, source := range loginSources {
|
for _, source := range loginSources {
|
||||||
oAuth2Config := source.OAuth2()
|
oAuth2Config := source.OAuth2()
|
||||||
oauth2.RegisterProvider(source.Name, oAuth2Config.Provider, oAuth2Config.ClientID, oAuth2Config.ClientSecret, oAuth2Config.OpenIDConnectAutoDiscoveryURL, oAuth2Config.CustomURLMapping)
|
err := oauth2.RegisterProvider(source.Name, oAuth2Config.Provider, oAuth2Config.ClientID, oAuth2Config.ClientSecret, oAuth2Config.OpenIDConnectAutoDiscoveryURL, oAuth2Config.CustomURLMapping)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -142,6 +142,9 @@ func GetOAuth2ApplicationByID(id int64) (app *OAuth2Application, err error) {
|
||||||
func getOAuth2ApplicationByID(e Engine, id int64) (app *OAuth2Application, err error) {
|
func getOAuth2ApplicationByID(e Engine, id int64) (app *OAuth2Application, err error) {
|
||||||
app = new(OAuth2Application)
|
app = new(OAuth2Application)
|
||||||
has, err := e.ID(id).Get(app)
|
has, err := e.ID(id).Get(app)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
if !has {
|
if !has {
|
||||||
return nil, ErrOAuthApplicationNotFound{ID: id}
|
return nil, ErrOAuthApplicationNotFound{ID: id}
|
||||||
}
|
}
|
||||||
|
@ -295,10 +298,10 @@ func (code *OAuth2AuthorizationCode) invalidate(e Engine) error {
|
||||||
|
|
||||||
// ValidateCodeChallenge validates the given verifier against the saved code challenge. This is part of the PKCE implementation.
|
// ValidateCodeChallenge validates the given verifier against the saved code challenge. This is part of the PKCE implementation.
|
||||||
func (code *OAuth2AuthorizationCode) ValidateCodeChallenge(verifier string) bool {
|
func (code *OAuth2AuthorizationCode) ValidateCodeChallenge(verifier string) bool {
|
||||||
return code.validateCodeChallenge(x, verifier)
|
return code.validateCodeChallenge(verifier)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (code *OAuth2AuthorizationCode) validateCodeChallenge(e Engine, verifier string) bool {
|
func (code *OAuth2AuthorizationCode) validateCodeChallenge(verifier string) bool {
|
||||||
switch code.CodeChallengeMethod {
|
switch code.CodeChallengeMethod {
|
||||||
case "S256":
|
case "S256":
|
||||||
// base64url(SHA256(verifier)) see https://tools.ietf.org/html/rfc7636#section-4.6
|
// base64url(SHA256(verifier)) see https://tools.ietf.org/html/rfc7636#section-4.6
|
||||||
|
|
|
@ -172,7 +172,9 @@ func CreateOrganization(org, owner *User) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = sess.Insert(&units); err != nil {
|
if _, err = sess.Insert(&units); err != nil {
|
||||||
sess.Rollback()
|
if err := sess.Rollback(); err != nil {
|
||||||
|
log.Error("CreateOrganization: sess.Rollback: %v", err)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -376,10 +378,7 @@ func HasOrgVisible(org *User, user *User) bool {
|
||||||
func hasOrgVisible(e Engine, org *User, user *User) bool {
|
func hasOrgVisible(e Engine, org *User, user *User) bool {
|
||||||
// Not SignedUser
|
// Not SignedUser
|
||||||
if user == nil {
|
if user == nil {
|
||||||
if org.Visibility == structs.VisibleTypePublic {
|
return org.Visibility == structs.VisibleTypePublic
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if user.IsAdmin {
|
if user.IsAdmin {
|
||||||
|
@ -485,10 +484,14 @@ func AddOrgUser(orgID, uid int64) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := sess.Insert(ou); err != nil {
|
if _, err := sess.Insert(ou); err != nil {
|
||||||
sess.Rollback()
|
if err := sess.Rollback(); err != nil {
|
||||||
|
log.Error("AddOrgUser: sess.Rollback: %v", err)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
} else if _, err = sess.Exec("UPDATE `user` SET num_members = num_members + 1 WHERE id = ?", orgID); err != nil {
|
} else if _, err = sess.Exec("UPDATE `user` SET num_members = num_members + 1 WHERE id = ?", orgID); err != nil {
|
||||||
sess.Rollback()
|
if err := sess.Rollback(); err != nil {
|
||||||
|
log.Error("AddOrgUser: sess.Rollback: %v", err)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -287,7 +287,8 @@ func NewTeam(t *Team) (err error) {
|
||||||
has, err := x.ID(t.OrgID).Get(new(User))
|
has, err := x.ID(t.OrgID).Get(new(User))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
} else if !has {
|
}
|
||||||
|
if !has {
|
||||||
return ErrOrgNotExist{t.OrgID, ""}
|
return ErrOrgNotExist{t.OrgID, ""}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -298,7 +299,8 @@ func NewTeam(t *Team) (err error) {
|
||||||
Get(new(Team))
|
Get(new(Team))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
} else if has {
|
}
|
||||||
|
if has {
|
||||||
return ErrTeamAlreadyExist{t.OrgID, t.LowerName}
|
return ErrTeamAlreadyExist{t.OrgID, t.LowerName}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -309,7 +311,10 @@ func NewTeam(t *Team) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = sess.Insert(t); err != nil {
|
if _, err = sess.Insert(t); err != nil {
|
||||||
sess.Rollback()
|
errRollback := sess.Rollback()
|
||||||
|
if errRollback != nil {
|
||||||
|
log.Error("NewTeam sess.Rollback: %v", errRollback)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -319,14 +324,20 @@ func NewTeam(t *Team) (err error) {
|
||||||
unit.TeamID = t.ID
|
unit.TeamID = t.ID
|
||||||
}
|
}
|
||||||
if _, err = sess.Insert(&t.Units); err != nil {
|
if _, err = sess.Insert(&t.Units); err != nil {
|
||||||
sess.Rollback()
|
errRollback := sess.Rollback()
|
||||||
|
if errRollback != nil {
|
||||||
|
log.Error("NewTeam sess.Rollback: %v", errRollback)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update organization number of teams.
|
// Update organization number of teams.
|
||||||
if _, err = sess.Exec("UPDATE `user` SET num_teams=num_teams+1 WHERE id = ?", t.OrgID); err != nil {
|
if _, err = sess.Exec("UPDATE `user` SET num_teams=num_teams+1 WHERE id = ?", t.OrgID); err != nil {
|
||||||
sess.Rollback()
|
errRollback := sess.Rollback()
|
||||||
|
if errRollback != nil {
|
||||||
|
log.Error("NewTeam sess.Rollback: %v", errRollback)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return sess.Commit()
|
return sess.Commit()
|
||||||
|
@ -412,7 +423,10 @@ func UpdateTeam(t *Team, authChanged bool) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = sess.Insert(&t.Units); err != nil {
|
if _, err = sess.Insert(&t.Units); err != nil {
|
||||||
sess.Rollback()
|
errRollback := sess.Rollback()
|
||||||
|
if errRollback != nil {
|
||||||
|
log.Error("UpdateTeam sess.Rollback: %v", errRollback)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -841,7 +855,10 @@ func UpdateTeamUnits(team *Team, units []TeamUnit) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = sess.Insert(units); err != nil {
|
if _, err = sess.Insert(units); err != nil {
|
||||||
sess.Rollback()
|
errRollback := sess.Rollback()
|
||||||
|
if errRollback != nil {
|
||||||
|
log.Error("UpdateTeamUnits sess.Rollback: %v", errRollback)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -242,10 +242,10 @@ func TestGetOrgByName(t *testing.T) {
|
||||||
assert.EqualValues(t, 3, org.ID)
|
assert.EqualValues(t, 3, org.ID)
|
||||||
assert.Equal(t, "user3", org.Name)
|
assert.Equal(t, "user3", org.Name)
|
||||||
|
|
||||||
org, err = GetOrgByName("user2") // user2 is an individual
|
_, err = GetOrgByName("user2") // user2 is an individual
|
||||||
assert.True(t, IsErrOrgNotExist(err))
|
assert.True(t, IsErrOrgNotExist(err))
|
||||||
|
|
||||||
org, err = GetOrgByName("") // corner case
|
_, err = GetOrgByName("") // corner case
|
||||||
assert.True(t, IsErrOrgNotExist(err))
|
assert.True(t, IsErrOrgNotExist(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -499,7 +499,7 @@ func TestAccessibleReposEnv_CountRepos(t *testing.T) {
|
||||||
func TestAccessibleReposEnv_RepoIDs(t *testing.T) {
|
func TestAccessibleReposEnv_RepoIDs(t *testing.T) {
|
||||||
assert.NoError(t, PrepareTestDatabase())
|
assert.NoError(t, PrepareTestDatabase())
|
||||||
org := AssertExistsAndLoadBean(t, &User{ID: 3}).(*User)
|
org := AssertExistsAndLoadBean(t, &User{ID: 3}).(*User)
|
||||||
testSuccess := func(userID, page, pageSize int64, expectedRepoIDs []int64) {
|
testSuccess := func(userID, _, pageSize int64, expectedRepoIDs []int64) {
|
||||||
env, err := org.AccessibleReposEnv(userID)
|
env, err := org.AccessibleReposEnv(userID)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
repoIDs, err := env.RepoIDs(1, 100)
|
repoIDs, err := env.RepoIDs(1, 100)
|
||||||
|
|
|
@ -192,15 +192,19 @@ func (pr *PullRequest) apiFormat(e Engine) *api.PullRequest {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if baseBranch, err = pr.BaseRepo.GetBranch(pr.BaseBranch); err != nil {
|
if baseBranch, err = pr.BaseRepo.GetBranch(pr.BaseBranch); err != nil {
|
||||||
|
log.Error("pr.BaseRepo.GetBranch[%d]: %v", pr.BaseBranch, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if baseCommit, err = baseBranch.GetCommit(); err != nil {
|
if baseCommit, err = baseBranch.GetCommit(); err != nil {
|
||||||
|
log.Error("baseBranch.GetCommit[%d]: %v", pr.ID, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if headBranch, err = pr.HeadRepo.GetBranch(pr.HeadBranch); err != nil {
|
if headBranch, err = pr.HeadRepo.GetBranch(pr.HeadBranch); err != nil {
|
||||||
|
log.Error("pr.HeadRepo.GetBranch[%d]: %v", pr.HeadBranch, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if headCommit, err = headBranch.GetCommit(); err != nil {
|
if headCommit, err = headBranch.GetCommit(); err != nil {
|
||||||
|
log.Error("headBranch.GetCommit[%d]: %v", pr.ID, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
apiBaseBranchInfo := &api.PRBranchInfo{
|
apiBaseBranchInfo := &api.PRBranchInfo{
|
||||||
|
@ -218,7 +222,10 @@ func (pr *PullRequest) apiFormat(e Engine) *api.PullRequest {
|
||||||
Repository: pr.HeadRepo.innerAPIFormat(e, AccessModeNone, false),
|
Repository: pr.HeadRepo.innerAPIFormat(e, AccessModeNone, false),
|
||||||
}
|
}
|
||||||
|
|
||||||
pr.Issue.loadRepo(e)
|
if err = pr.Issue.loadRepo(e); err != nil {
|
||||||
|
log.Error("pr.Issue.loadRepo[%d]: %v", pr.ID, err)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
apiPullRequest := &api.PullRequest{
|
apiPullRequest := &api.PullRequest{
|
||||||
ID: pr.ID,
|
ID: pr.ID,
|
||||||
|
@ -420,7 +427,11 @@ func (pr *PullRequest) Merge(doer *User, baseGitRepo *git.Repository, mergeStyle
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer RemoveTemporaryPath(tmpBasePath)
|
defer func() {
|
||||||
|
if err := RemoveTemporaryPath(tmpBasePath); err != nil {
|
||||||
|
log.Error("Merge: RemoveTemporaryPath: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
headRepoPath := RepoPath(pr.HeadUserName, pr.HeadRepo.Name)
|
headRepoPath := RepoPath(pr.HeadUserName, pr.HeadRepo.Name)
|
||||||
|
|
||||||
|
@ -1142,7 +1153,9 @@ func (pr *PullRequest) UpdatePatch() (err error) {
|
||||||
return fmt.Errorf("AddRemote: %v", err)
|
return fmt.Errorf("AddRemote: %v", err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
headGitRepo.RemoveRemote(tmpRemote)
|
if err := headGitRepo.RemoveRemote(tmpRemote); err != nil {
|
||||||
|
log.Error("UpdatePatch: RemoveRemote: %s", err)
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
pr.MergeBase, _, err = headGitRepo.GetMergeBase(tmpRemote, pr.BaseBranch, pr.HeadBranch)
|
pr.MergeBase, _, err = headGitRepo.GetMergeBase(tmpRemote, pr.BaseBranch, pr.HeadBranch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -1180,7 +1193,11 @@ func (pr *PullRequest) PushToBaseRepo() (err error) {
|
||||||
return fmt.Errorf("headGitRepo.AddRemote: %v", err)
|
return fmt.Errorf("headGitRepo.AddRemote: %v", err)
|
||||||
}
|
}
|
||||||
// Make sure to remove the remote even if the push fails
|
// Make sure to remove the remote even if the push fails
|
||||||
defer headGitRepo.RemoveRemote(tmpRemoteName)
|
defer func() {
|
||||||
|
if err := headGitRepo.RemoveRemote(tmpRemoteName); err != nil {
|
||||||
|
log.Error("PushToBaseRepo: RemoveRemote: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
headFile := pr.GetGitRefName()
|
headFile := pr.GetGitRefName()
|
||||||
|
|
||||||
|
|
|
@ -94,7 +94,7 @@ func TestGetUnmergedPullRequest(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, int64(2), pr.ID)
|
assert.Equal(t, int64(2), pr.ID)
|
||||||
|
|
||||||
pr, err = GetUnmergedPullRequest(1, 9223372036854775807, "branch1", "master")
|
_, err = GetUnmergedPullRequest(1, 9223372036854775807, "branch1", "master")
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
assert.True(t, IsErrPullRequestNotExist(err))
|
assert.True(t, IsErrPullRequestNotExist(err))
|
||||||
}
|
}
|
||||||
|
@ -128,7 +128,7 @@ func TestGetPullRequestByIndex(t *testing.T) {
|
||||||
assert.Equal(t, int64(1), pr.BaseRepoID)
|
assert.Equal(t, int64(1), pr.BaseRepoID)
|
||||||
assert.Equal(t, int64(2), pr.Index)
|
assert.Equal(t, int64(2), pr.Index)
|
||||||
|
|
||||||
pr, err = GetPullRequestByIndex(9223372036854775807, 9223372036854775807)
|
_, err = GetPullRequestByIndex(9223372036854775807, 9223372036854775807)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
assert.True(t, IsErrPullRequestNotExist(err))
|
assert.True(t, IsErrPullRequestNotExist(err))
|
||||||
}
|
}
|
||||||
|
@ -151,7 +151,7 @@ func TestGetPullRequestByIssueID(t *testing.T) {
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, int64(2), pr.IssueID)
|
assert.Equal(t, int64(2), pr.IssueID)
|
||||||
|
|
||||||
pr, err = GetPullRequestByIssueID(9223372036854775807)
|
_, err = GetPullRequestByIssueID(9223372036854775807)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
assert.True(t, IsErrPullRequestNotExist(err))
|
assert.True(t, IsErrPullRequestNotExist(err))
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,12 +50,12 @@ func (r *Release) loadAttributes(e Engine) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if r.Publisher == nil {
|
if r.Publisher == nil {
|
||||||
r.Publisher, err = GetUserByID(r.PublisherID)
|
r.Publisher, err = getUserByID(e, r.PublisherID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return GetReleaseAttachments(r)
|
return getReleaseAttachments(e, r)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LoadAttributes load repo and publisher attributes for a release
|
// LoadAttributes load repo and publisher attributes for a release
|
||||||
|
@ -316,6 +316,10 @@ func (s releaseMetaSearch) Less(i, j int) bool {
|
||||||
|
|
||||||
// GetReleaseAttachments retrieves the attachments for releases
|
// GetReleaseAttachments retrieves the attachments for releases
|
||||||
func GetReleaseAttachments(rels ...*Release) (err error) {
|
func GetReleaseAttachments(rels ...*Release) (err error) {
|
||||||
|
return getReleaseAttachments(x, rels...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getReleaseAttachments(e Engine, rels ...*Release) (err error) {
|
||||||
if len(rels) == 0 {
|
if len(rels) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -335,11 +339,10 @@ func GetReleaseAttachments(rels ...*Release) (err error) {
|
||||||
sort.Sort(sortedRels)
|
sort.Sort(sortedRels)
|
||||||
|
|
||||||
// Select attachments
|
// Select attachments
|
||||||
err = x.
|
err = e.
|
||||||
Asc("release_id").
|
Asc("release_id").
|
||||||
In("release_id", sortedRels.ID).
|
In("release_id", sortedRels.ID).
|
||||||
Find(&attachments, Attachment{})
|
Find(&attachments, Attachment{})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -354,7 +357,6 @@ func GetReleaseAttachments(rels ...*Release) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type releaseSorter struct {
|
type releaseSorter struct {
|
||||||
|
@ -493,7 +495,7 @@ func SyncReleasesWithTags(repo *Repository, gitRepo *git.Repository) error {
|
||||||
return fmt.Errorf("GetTagCommitID: %v", err)
|
return fmt.Errorf("GetTagCommitID: %v", err)
|
||||||
}
|
}
|
||||||
if git.IsErrNotExist(err) || commitID != rel.Sha1 {
|
if git.IsErrNotExist(err) || commitID != rel.Sha1 {
|
||||||
if err := pushUpdateDeleteTag(repo, gitRepo, rel.TagName); err != nil {
|
if err := pushUpdateDeleteTag(repo, rel.TagName); err != nil {
|
||||||
return fmt.Errorf("pushUpdateDeleteTag: %v", err)
|
return fmt.Errorf("pushUpdateDeleteTag: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -20,7 +20,6 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -744,10 +743,6 @@ func (repo *Repository) getUsersWithAccessMode(e Engine, mode AccessMode) (_ []*
|
||||||
return users, nil
|
return users, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
|
||||||
descPattern = regexp.MustCompile(`https?://\S+`)
|
|
||||||
)
|
|
||||||
|
|
||||||
// DescriptionHTML does special handles to description and return HTML string.
|
// DescriptionHTML does special handles to description and return HTML string.
|
||||||
func (repo *Repository) DescriptionHTML() template.HTML {
|
func (repo *Repository) DescriptionHTML() template.HTML {
|
||||||
desc, err := markup.RenderDescriptionHTML([]byte(repo.Description), repo.HTMLURL(), repo.ComposeMetas())
|
desc, err := markup.RenderDescriptionHTML([]byte(repo.Description), repo.HTMLURL(), repo.ComposeMetas())
|
||||||
|
@ -1333,11 +1328,9 @@ func createRepository(e *xorm.Session, doer, u *User, repo *Repository) (err err
|
||||||
return fmt.Errorf("prepareWebhooks: %v", err)
|
return fmt.Errorf("prepareWebhooks: %v", err)
|
||||||
}
|
}
|
||||||
go HookQueue.Add(repo.ID)
|
go HookQueue.Add(repo.ID)
|
||||||
} else {
|
} else if err = repo.recalculateAccesses(e); err != nil {
|
||||||
// Organization automatically called this in addRepository method.
|
// Organization automatically called this in addRepository method.
|
||||||
if err = repo.recalculateAccesses(e); err != nil {
|
return fmt.Errorf("recalculateAccesses: %v", err)
|
||||||
return fmt.Errorf("recalculateAccesses: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if setting.Service.AutoWatchNewRepos {
|
if setting.Service.AutoWatchNewRepos {
|
||||||
|
@ -1512,11 +1505,9 @@ func TransferOwnership(doer *User, newOwnerName string, repo *Repository) error
|
||||||
} else if err = t.addRepository(sess, repo); err != nil {
|
} else if err = t.addRepository(sess, repo); err != nil {
|
||||||
return fmt.Errorf("add to owner team: %v", err)
|
return fmt.Errorf("add to owner team: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else if err = repo.recalculateAccesses(sess); err != nil {
|
||||||
// Organization called this in addRepository method.
|
// Organization called this in addRepository method.
|
||||||
if err = repo.recalculateAccesses(sess); err != nil {
|
return fmt.Errorf("recalculateAccesses: %v", err)
|
||||||
return fmt.Errorf("recalculateAccesses: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update repository count.
|
// Update repository count.
|
||||||
|
@ -1864,7 +1855,10 @@ func DeleteRepository(doer *User, uid, repoID int64) error {
|
||||||
repoPath := repo.repoPath(sess)
|
repoPath := repo.repoPath(sess)
|
||||||
removeAllWithNotice(sess, "Delete repository files", repoPath)
|
removeAllWithNotice(sess, "Delete repository files", repoPath)
|
||||||
|
|
||||||
repo.deleteWiki(sess)
|
err = repo.deleteWiki(sess)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
// Remove attachment files.
|
// Remove attachment files.
|
||||||
for i := range attachmentPaths {
|
for i := range attachmentPaths {
|
||||||
|
@ -2522,7 +2516,7 @@ func (repo *Repository) GetUserFork(userID int64) (*Repository, error) {
|
||||||
// CustomAvatarPath returns repository custom avatar file path.
|
// CustomAvatarPath returns repository custom avatar file path.
|
||||||
func (repo *Repository) CustomAvatarPath() string {
|
func (repo *Repository) CustomAvatarPath() string {
|
||||||
// Avatar empty by default
|
// Avatar empty by default
|
||||||
if len(repo.Avatar) <= 0 {
|
if len(repo.Avatar) == 0 {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return filepath.Join(setting.RepositoryAvatarUploadPath, repo.Avatar)
|
return filepath.Join(setting.RepositoryAvatarUploadPath, repo.Avatar)
|
||||||
|
@ -2562,10 +2556,7 @@ func (repo *Repository) generateRandomAvatar(e Engine) error {
|
||||||
|
|
||||||
// RemoveRandomAvatars removes the randomly generated avatars that were created for repositories
|
// RemoveRandomAvatars removes the randomly generated avatars that were created for repositories
|
||||||
func RemoveRandomAvatars() error {
|
func RemoveRandomAvatars() error {
|
||||||
var (
|
return x.
|
||||||
err error
|
|
||||||
)
|
|
||||||
err = x.
|
|
||||||
Where("id > 0").BufferSize(setting.IterateBufferSize).
|
Where("id > 0").BufferSize(setting.IterateBufferSize).
|
||||||
Iterate(new(Repository),
|
Iterate(new(Repository),
|
||||||
func(idx int, bean interface{}) error {
|
func(idx int, bean interface{}) error {
|
||||||
|
@ -2576,7 +2567,6 @@ func RemoveRandomAvatars() error {
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// RelAvatarLink returns a relative link to the repository's avatar.
|
// RelAvatarLink returns a relative link to the repository's avatar.
|
||||||
|
@ -2587,7 +2577,7 @@ func (repo *Repository) RelAvatarLink() string {
|
||||||
func (repo *Repository) relAvatarLink(e Engine) string {
|
func (repo *Repository) relAvatarLink(e Engine) string {
|
||||||
// If no avatar - path is empty
|
// If no avatar - path is empty
|
||||||
avatarPath := repo.CustomAvatarPath()
|
avatarPath := repo.CustomAvatarPath()
|
||||||
if len(avatarPath) <= 0 || !com.IsFile(avatarPath) {
|
if len(avatarPath) == 0 || !com.IsFile(avatarPath) {
|
||||||
switch mode := setting.RepositoryAvatarFallback; mode {
|
switch mode := setting.RepositoryAvatarFallback; mode {
|
||||||
case "image":
|
case "image":
|
||||||
return setting.RepositoryAvatarFallbackImage
|
return setting.RepositoryAvatarFallbackImage
|
||||||
|
|
|
@ -114,7 +114,7 @@ func GetActivityStatsTopAuthors(repo *Repository, timeFrom time.Time, count int)
|
||||||
v = append(v, u)
|
v = append(v, u)
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.Slice(v[:], func(i, j int) bool {
|
sort.Slice(v, func(i, j int) bool {
|
||||||
return v[i].Commits < v[j].Commits
|
return v[i].Commits < v[j].Commits
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,11 @@ func (repo *Repository) CreateNewBranch(doer *User, oldBranchName, branchName st
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer RemoveTemporaryPath(basePath)
|
defer func() {
|
||||||
|
if err := RemoveTemporaryPath(basePath); err != nil {
|
||||||
|
log.Error("CreateNewBranch: RemoveTemporaryPath: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
if err := git.Clone(repo.RepoPath(), basePath, git.CloneRepoOptions{
|
if err := git.Clone(repo.RepoPath(), basePath, git.CloneRepoOptions{
|
||||||
Bare: true,
|
Bare: true,
|
||||||
|
@ -117,7 +121,11 @@ func (repo *Repository) CreateNewBranchFromCommit(doer *User, commit, branchName
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer RemoveTemporaryPath(basePath)
|
defer func() {
|
||||||
|
if err := RemoveTemporaryPath(basePath); err != nil {
|
||||||
|
log.Error("CreateNewBranchFromCommit: RemoveTemporaryPath: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
if err := git.Clone(repo.RepoPath(), basePath, git.CloneRepoOptions{
|
if err := git.Clone(repo.RepoPath(), basePath, git.CloneRepoOptions{
|
||||||
Bare: true,
|
Bare: true,
|
||||||
|
|
|
@ -142,7 +142,7 @@ func (repo *Repository) ChangeCollaborationAccessMode(uid int64, mode AccessMode
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err = sess.
|
if _, err = sess.
|
||||||
Id(collaboration.ID).
|
ID(collaboration.ID).
|
||||||
Cols("mode").
|
Cols("mode").
|
||||||
Update(collaboration); err != nil {
|
Update(collaboration); err != nil {
|
||||||
return fmt.Errorf("update collaboration: %v", err)
|
return fmt.Errorf("update collaboration: %v", err)
|
||||||
|
|
|
@ -148,19 +148,19 @@ func (s SearchOrderBy) String() string {
|
||||||
// Strings for sorting result
|
// Strings for sorting result
|
||||||
const (
|
const (
|
||||||
SearchOrderByAlphabetically SearchOrderBy = "name ASC"
|
SearchOrderByAlphabetically SearchOrderBy = "name ASC"
|
||||||
SearchOrderByAlphabeticallyReverse = "name DESC"
|
SearchOrderByAlphabeticallyReverse SearchOrderBy = "name DESC"
|
||||||
SearchOrderByLeastUpdated = "updated_unix ASC"
|
SearchOrderByLeastUpdated SearchOrderBy = "updated_unix ASC"
|
||||||
SearchOrderByRecentUpdated = "updated_unix DESC"
|
SearchOrderByRecentUpdated SearchOrderBy = "updated_unix DESC"
|
||||||
SearchOrderByOldest = "created_unix ASC"
|
SearchOrderByOldest SearchOrderBy = "created_unix ASC"
|
||||||
SearchOrderByNewest = "created_unix DESC"
|
SearchOrderByNewest SearchOrderBy = "created_unix DESC"
|
||||||
SearchOrderBySize = "size ASC"
|
SearchOrderBySize SearchOrderBy = "size ASC"
|
||||||
SearchOrderBySizeReverse = "size DESC"
|
SearchOrderBySizeReverse SearchOrderBy = "size DESC"
|
||||||
SearchOrderByID = "id ASC"
|
SearchOrderByID SearchOrderBy = "id ASC"
|
||||||
SearchOrderByIDReverse = "id DESC"
|
SearchOrderByIDReverse SearchOrderBy = "id DESC"
|
||||||
SearchOrderByStars = "num_stars ASC"
|
SearchOrderByStars SearchOrderBy = "num_stars ASC"
|
||||||
SearchOrderByStarsReverse = "num_stars DESC"
|
SearchOrderByStarsReverse SearchOrderBy = "num_stars DESC"
|
||||||
SearchOrderByForks = "num_forks ASC"
|
SearchOrderByForks SearchOrderBy = "num_forks ASC"
|
||||||
SearchOrderByForksReverse = "num_forks DESC"
|
SearchOrderByForksReverse SearchOrderBy = "num_forks DESC"
|
||||||
)
|
)
|
||||||
|
|
||||||
// SearchRepositoryByName takes keyword and part of repository name to search,
|
// SearchRepositoryByName takes keyword and part of repository name to search,
|
||||||
|
|
|
@ -4,7 +4,10 @@
|
||||||
|
|
||||||
package models
|
package models
|
||||||
|
|
||||||
import "strings"
|
import (
|
||||||
|
"code.gitea.io/gitea/modules/log"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
// RepoRedirect represents that a repo name should be redirected to another
|
// RepoRedirect represents that a repo name should be redirected to another
|
||||||
type RepoRedirect struct {
|
type RepoRedirect struct {
|
||||||
|
@ -38,7 +41,10 @@ func NewRepoRedirect(ownerID, repoID int64, oldRepoName, newRepoName string) err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := deleteRepoRedirect(sess, ownerID, newRepoName); err != nil {
|
if err := deleteRepoRedirect(sess, ownerID, newRepoName); err != nil {
|
||||||
sess.Rollback()
|
errRollback := sess.Rollback()
|
||||||
|
if errRollback != nil {
|
||||||
|
log.Error("NewRepoRedirect sess.Rollback: %v", errRollback)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,7 +53,10 @@ func NewRepoRedirect(ownerID, repoID int64, oldRepoName, newRepoName string) err
|
||||||
LowerName: oldRepoName,
|
LowerName: oldRepoName,
|
||||||
RedirectRepoID: repoID,
|
RedirectRepoID: repoID,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
sess.Rollback()
|
errRollback := sess.Rollback()
|
||||||
|
if errRollback != nil {
|
||||||
|
log.Error("NewRepoRedirect sess.Rollback: %v", errRollback)
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return sess.Commit()
|
return sess.Commit()
|
||||||
|
|
|
@ -142,7 +142,7 @@ func parseKeyString(content string) (string, error) {
|
||||||
if continuationLine || strings.ContainsAny(line, ":-") {
|
if continuationLine || strings.ContainsAny(line, ":-") {
|
||||||
continuationLine = strings.HasSuffix(line, "\\")
|
continuationLine = strings.HasSuffix(line, "\\")
|
||||||
} else {
|
} else {
|
||||||
keyContent = keyContent + line
|
keyContent += line
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -392,7 +392,7 @@ func addKey(e Engine, key *PublicKey) (err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddPublicKey adds new public key to database and authorized_keys file.
|
// AddPublicKey adds new public key to database and authorized_keys file.
|
||||||
func AddPublicKey(ownerID int64, name, content string, LoginSourceID int64) (*PublicKey, error) {
|
func AddPublicKey(ownerID int64, name, content string, loginSourceID int64) (*PublicKey, error) {
|
||||||
log.Trace(content)
|
log.Trace(content)
|
||||||
|
|
||||||
fingerprint, err := calcFingerprint(content)
|
fingerprint, err := calcFingerprint(content)
|
||||||
|
@ -427,7 +427,7 @@ func AddPublicKey(ownerID int64, name, content string, LoginSourceID int64) (*Pu
|
||||||
Content: content,
|
Content: content,
|
||||||
Mode: AccessModeWrite,
|
Mode: AccessModeWrite,
|
||||||
Type: KeyTypeUser,
|
Type: KeyTypeUser,
|
||||||
LoginSourceID: LoginSourceID,
|
LoginSourceID: loginSourceID,
|
||||||
}
|
}
|
||||||
if err = addKey(sess, key); err != nil {
|
if err = addKey(sess, key); err != nil {
|
||||||
return nil, fmt.Errorf("addKey: %v", err)
|
return nil, fmt.Errorf("addKey: %v", err)
|
||||||
|
@ -491,10 +491,10 @@ func ListPublicKeys(uid int64) ([]*PublicKey, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ListPublicLdapSSHKeys returns a list of synchronized public ldap ssh keys belongs to given user and login source.
|
// ListPublicLdapSSHKeys returns a list of synchronized public ldap ssh keys belongs to given user and login source.
|
||||||
func ListPublicLdapSSHKeys(uid int64, LoginSourceID int64) ([]*PublicKey, error) {
|
func ListPublicLdapSSHKeys(uid int64, loginSourceID int64) ([]*PublicKey, error) {
|
||||||
keys := make([]*PublicKey, 0, 5)
|
keys := make([]*PublicKey, 0, 5)
|
||||||
return keys, x.
|
return keys, x.
|
||||||
Where("owner_id = ? AND login_source_id = ?", uid, LoginSourceID).
|
Where("owner_id = ? AND login_source_id = ?", uid, loginSourceID).
|
||||||
Find(&keys)
|
Find(&keys)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -87,7 +87,7 @@ func (status *CommitStatus) loadRepo(e Engine) (err error) {
|
||||||
|
|
||||||
// APIURL returns the absolute APIURL to this commit-status.
|
// APIURL returns the absolute APIURL to this commit-status.
|
||||||
func (status *CommitStatus) APIURL() string {
|
func (status *CommitStatus) APIURL() string {
|
||||||
status.loadRepo(x)
|
_ = status.loadRepo(x)
|
||||||
return fmt.Sprintf("%sapi/v1/%s/statuses/%s",
|
return fmt.Sprintf("%sapi/v1/%s/statuses/%s",
|
||||||
setting.AppURL, status.Repo.FullName(), status.SHA)
|
setting.AppURL, status.Repo.FullName(), status.SHA)
|
||||||
}
|
}
|
||||||
|
@ -95,7 +95,7 @@ func (status *CommitStatus) APIURL() string {
|
||||||
// APIFormat assumes some fields assigned with values:
|
// APIFormat assumes some fields assigned with values:
|
||||||
// Required - Repo, Creator
|
// Required - Repo, Creator
|
||||||
func (status *CommitStatus) APIFormat() *api.Status {
|
func (status *CommitStatus) APIFormat() *api.Status {
|
||||||
status.loadRepo(x)
|
_ = status.loadRepo(x)
|
||||||
apiStatus := &api.Status{
|
apiStatus := &api.Status{
|
||||||
Created: status.CreatedUnix.AsTime(),
|
Created: status.CreatedUnix.AsTime(),
|
||||||
Updated: status.CreatedUnix.AsTime(),
|
Updated: status.CreatedUnix.AsTime(),
|
||||||
|
@ -219,7 +219,9 @@ func newCommitStatus(sess *xorm.Session, opts NewCommitStatusOptions) error {
|
||||||
}
|
}
|
||||||
has, err := sess.Desc("index").Limit(1).Get(lastCommitStatus)
|
has, err := sess.Desc("index").Limit(1).Get(lastCommitStatus)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
sess.Rollback()
|
if err := sess.Rollback(); err != nil {
|
||||||
|
log.Error("newCommitStatus: sess.Rollback: %v", err)
|
||||||
|
}
|
||||||
return fmt.Errorf("newCommitStatus[%s, %s]: %v", repoPath, opts.SHA, err)
|
return fmt.Errorf("newCommitStatus[%s, %s]: %v", repoPath, opts.SHA, err)
|
||||||
}
|
}
|
||||||
if has {
|
if has {
|
||||||
|
@ -231,7 +233,9 @@ func newCommitStatus(sess *xorm.Session, opts NewCommitStatusOptions) error {
|
||||||
|
|
||||||
// Insert new CommitStatus
|
// Insert new CommitStatus
|
||||||
if _, err = sess.Insert(opts.CommitStatus); err != nil {
|
if _, err = sess.Insert(opts.CommitStatus); err != nil {
|
||||||
sess.Rollback()
|
if err := sess.Rollback(); err != nil {
|
||||||
|
log.Error("newCommitStatus: sess.Rollback: %v", err)
|
||||||
|
}
|
||||||
return fmt.Errorf("newCommitStatus[%s, %s]: %v", repoPath, opts.SHA, err)
|
return fmt.Errorf("newCommitStatus[%s, %s]: %v", repoPath, opts.SHA, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,11 +36,11 @@ func TestGetAccessTokenBySHA(t *testing.T) {
|
||||||
assert.Equal(t, "2b3668e11cb82d3af8c6e4524fc7841297668f5008d1626f0ad3417e9fa39af84c268248b78c481daa7e5dc437784003494f", token.TokenHash)
|
assert.Equal(t, "2b3668e11cb82d3af8c6e4524fc7841297668f5008d1626f0ad3417e9fa39af84c268248b78c481daa7e5dc437784003494f", token.TokenHash)
|
||||||
assert.Equal(t, "e4efbf36", token.TokenLastEight)
|
assert.Equal(t, "e4efbf36", token.TokenLastEight)
|
||||||
|
|
||||||
token, err = GetAccessTokenBySHA("notahash")
|
_, err = GetAccessTokenBySHA("notahash")
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
assert.True(t, IsErrAccessTokenNotExist(err))
|
assert.True(t, IsErrAccessTokenNotExist(err))
|
||||||
|
|
||||||
token, err = GetAccessTokenBySHA("")
|
_, err = GetAccessTokenBySHA("")
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
assert.True(t, IsErrAccessTokenEmpty(err))
|
assert.True(t, IsErrAccessTokenEmpty(err))
|
||||||
}
|
}
|
||||||
|
|
|
@ -84,7 +84,7 @@ func PushUpdate(branch string, opt PushUpdateOptions) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func pushUpdateDeleteTag(repo *Repository, gitRepo *git.Repository, tagName string) error {
|
func pushUpdateDeleteTag(repo *Repository, tagName string) error {
|
||||||
rel, err := GetRelease(repo.ID, tagName)
|
rel, err := GetRelease(repo.ID, tagName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if IsErrReleaseNotExist(err) {
|
if IsErrReleaseNotExist(err) {
|
||||||
|
@ -223,7 +223,7 @@ func pushUpdate(opts PushUpdateOptions) (repo *Repository, err error) {
|
||||||
// If is tag reference
|
// If is tag reference
|
||||||
tagName := opts.RefFullName[len(git.TagPrefix):]
|
tagName := opts.RefFullName[len(git.TagPrefix):]
|
||||||
if isDelRef {
|
if isDelRef {
|
||||||
err = pushUpdateDeleteTag(repo, gitRepo, tagName)
|
err = pushUpdateDeleteTag(repo, tagName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("pushUpdateDeleteTag: %v", err)
|
return nil, fmt.Errorf("pushUpdateDeleteTag: %v", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -1072,7 +1072,10 @@ func deleteUser(e *xorm.Session, u *User) error {
|
||||||
if _, err = e.Delete(&PublicKey{OwnerID: u.ID}); err != nil {
|
if _, err = e.Delete(&PublicKey{OwnerID: u.ID}); err != nil {
|
||||||
return fmt.Errorf("deletePublicKeys: %v", err)
|
return fmt.Errorf("deletePublicKeys: %v", err)
|
||||||
}
|
}
|
||||||
rewriteAllPublicKeys(e)
|
err = rewriteAllPublicKeys(e)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
// ***** END: PublicKey *****
|
// ***** END: PublicKey *****
|
||||||
|
|
||||||
// ***** START: GPGPublicKey *****
|
// ***** START: GPGPublicKey *****
|
||||||
|
@ -1401,8 +1404,7 @@ func (opts *SearchUserOptions) toConds() builder.Cond {
|
||||||
} else {
|
} else {
|
||||||
exprCond = builder.Expr("org_user.org_id = \"user\".id")
|
exprCond = builder.Expr("org_user.org_id = \"user\".id")
|
||||||
}
|
}
|
||||||
var accessCond = builder.NewCond()
|
accessCond := builder.Or(
|
||||||
accessCond = builder.Or(
|
|
||||||
builder.In("id", builder.Select("org_id").From("org_user").LeftJoin("`user`", exprCond).Where(builder.And(builder.Eq{"uid": opts.OwnerID}, builder.Eq{"visibility": structs.VisibleTypePrivate}))),
|
builder.In("id", builder.Select("org_id").From("org_user").LeftJoin("`user`", exprCond).Where(builder.And(builder.Eq{"uid": opts.OwnerID}, builder.Eq{"visibility": structs.VisibleTypePrivate}))),
|
||||||
builder.In("visibility", structs.VisibleTypePublic, structs.VisibleTypeLimited))
|
builder.In("visibility", structs.VisibleTypePublic, structs.VisibleTypeLimited))
|
||||||
cond = cond.And(accessCond)
|
cond = cond.And(accessCond)
|
||||||
|
@ -1512,9 +1514,9 @@ func deleteKeysMarkedForDeletion(keys []string) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// addLdapSSHPublicKeys add a users public keys. Returns true if there are changes.
|
// addLdapSSHPublicKeys add a users public keys. Returns true if there are changes.
|
||||||
func addLdapSSHPublicKeys(usr *User, s *LoginSource, SSHPublicKeys []string) bool {
|
func addLdapSSHPublicKeys(usr *User, s *LoginSource, sshPublicKeys []string) bool {
|
||||||
var sshKeysNeedUpdate bool
|
var sshKeysNeedUpdate bool
|
||||||
for _, sshKey := range SSHPublicKeys {
|
for _, sshKey := range sshPublicKeys {
|
||||||
_, _, _, _, err := ssh.ParseAuthorizedKey([]byte(sshKey))
|
_, _, _, _, err := ssh.ParseAuthorizedKey([]byte(sshKey))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
sshKeyName := fmt.Sprintf("%s-%s", s.Name, sshKey[0:40])
|
sshKeyName := fmt.Sprintf("%s-%s", s.Name, sshKey[0:40])
|
||||||
|
@ -1536,7 +1538,7 @@ func addLdapSSHPublicKeys(usr *User, s *LoginSource, SSHPublicKeys []string) boo
|
||||||
}
|
}
|
||||||
|
|
||||||
// synchronizeLdapSSHPublicKeys updates a users public keys. Returns true if there are changes.
|
// synchronizeLdapSSHPublicKeys updates a users public keys. Returns true if there are changes.
|
||||||
func synchronizeLdapSSHPublicKeys(usr *User, s *LoginSource, SSHPublicKeys []string) bool {
|
func synchronizeLdapSSHPublicKeys(usr *User, s *LoginSource, sshPublicKeys []string) bool {
|
||||||
var sshKeysNeedUpdate bool
|
var sshKeysNeedUpdate bool
|
||||||
|
|
||||||
log.Trace("synchronizeLdapSSHPublicKeys[%s]: Handling LDAP Public SSH Key synchronization for user %s", s.Name, usr.Name)
|
log.Trace("synchronizeLdapSSHPublicKeys[%s]: Handling LDAP Public SSH Key synchronization for user %s", s.Name, usr.Name)
|
||||||
|
@ -1554,7 +1556,7 @@ func synchronizeLdapSSHPublicKeys(usr *User, s *LoginSource, SSHPublicKeys []str
|
||||||
|
|
||||||
// Get Public Keys from LDAP and skip duplicate keys
|
// Get Public Keys from LDAP and skip duplicate keys
|
||||||
var ldapKeys []string
|
var ldapKeys []string
|
||||||
for _, v := range SSHPublicKeys {
|
for _, v := range sshPublicKeys {
|
||||||
sshKeySplit := strings.Split(v, " ")
|
sshKeySplit := strings.Split(v, " ")
|
||||||
if len(sshKeySplit) > 1 {
|
if len(sshKeySplit) > 1 {
|
||||||
ldapKey := strings.Join(sshKeySplit[:2], " ")
|
ldapKey := strings.Join(sshKeySplit[:2], " ")
|
||||||
|
@ -1634,9 +1636,13 @@ func SyncExternalUsers() {
|
||||||
|
|
||||||
// Find all users with this login type
|
// Find all users with this login type
|
||||||
var users []*User
|
var users []*User
|
||||||
x.Where("login_type = ?", LoginLDAP).
|
err = x.Where("login_type = ?", LoginLDAP).
|
||||||
And("login_source = ?", s.ID).
|
And("login_source = ?", s.ID).
|
||||||
Find(&users)
|
Find(&users)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("SyncExternalUsers: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
sr := s.LDAP().SearchEntries()
|
sr := s.LDAP().SearchEntries()
|
||||||
for _, su := range sr {
|
for _, su := range sr {
|
||||||
|
@ -1694,7 +1700,7 @@ func SyncExternalUsers() {
|
||||||
|
|
||||||
// Check if user data has changed
|
// Check if user data has changed
|
||||||
if (len(s.LDAP().AdminFilter) > 0 && usr.IsAdmin != su.IsAdmin) ||
|
if (len(s.LDAP().AdminFilter) > 0 && usr.IsAdmin != su.IsAdmin) ||
|
||||||
strings.ToLower(usr.Email) != strings.ToLower(su.Mail) ||
|
!strings.EqualFold(usr.Email, su.Mail) ||
|
||||||
usr.FullName != fullName ||
|
usr.FullName != fullName ||
|
||||||
!usr.IsActive {
|
!usr.IsActive {
|
||||||
|
|
||||||
|
@ -1718,7 +1724,10 @@ func SyncExternalUsers() {
|
||||||
|
|
||||||
// Rewrite authorized_keys file if LDAP Public SSH Key attribute is set and any key was added or removed
|
// Rewrite authorized_keys file if LDAP Public SSH Key attribute is set and any key was added or removed
|
||||||
if sshKeysNeedUpdate {
|
if sshKeysNeedUpdate {
|
||||||
RewriteAllPublicKeys()
|
err = RewriteAllPublicKeys()
|
||||||
|
if err != nil {
|
||||||
|
log.Error("RewriteAllPublicKeys: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deactivate users not present in LDAP
|
// Deactivate users not present in LDAP
|
||||||
|
|
|
@ -134,7 +134,7 @@ func (email *EmailAddress) Activate() error {
|
||||||
|
|
||||||
email.IsActivated = true
|
email.IsActivated = true
|
||||||
if _, err := sess.
|
if _, err := sess.
|
||||||
Id(email.ID).
|
ID(email.ID).
|
||||||
Cols("is_activated").
|
Cols("is_activated").
|
||||||
Update(email); err != nil {
|
Update(email); err != nil {
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -31,12 +31,12 @@ func TestGetUserOpenIDs(t *testing.T) {
|
||||||
func TestGetUserByOpenID(t *testing.T) {
|
func TestGetUserByOpenID(t *testing.T) {
|
||||||
assert.NoError(t, PrepareTestDatabase())
|
assert.NoError(t, PrepareTestDatabase())
|
||||||
|
|
||||||
user, err := GetUserByOpenID("https://unknown")
|
_, err := GetUserByOpenID("https://unknown")
|
||||||
if assert.Error(t, err) {
|
if assert.Error(t, err) {
|
||||||
assert.True(t, IsErrUserNotExist(err))
|
assert.True(t, IsErrUserNotExist(err))
|
||||||
}
|
}
|
||||||
|
|
||||||
user, err = GetUserByOpenID("https://user1.domain1.tld")
|
user, err := GetUserByOpenID("https://user1.domain1.tld")
|
||||||
if assert.NoError(t, err) {
|
if assert.NoError(t, err) {
|
||||||
assert.Equal(t, user.ID, int64(1))
|
assert.Equal(t, user.ID, int64(1))
|
||||||
}
|
}
|
||||||
|
|
|
@ -700,7 +700,10 @@ func prepareWebhook(e Engine, w *Webhook, repo *Repository, event HookEventType,
|
||||||
log.Error("prepareWebhooks.JSONPayload: %v", err)
|
log.Error("prepareWebhooks.JSONPayload: %v", err)
|
||||||
}
|
}
|
||||||
sig := hmac.New(sha256.New, []byte(w.Secret))
|
sig := hmac.New(sha256.New, []byte(w.Secret))
|
||||||
sig.Write(data)
|
_, err = sig.Write(data)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("prepareWebhooks.sigWrite: %v", err)
|
||||||
|
}
|
||||||
signature = hex.EncodeToString(sig.Sum(nil))
|
signature = hex.EncodeToString(sig.Sum(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -930,8 +933,7 @@ func InitDeliverHooks() {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
conn.SetDeadline(time.Now().Add(timeout))
|
return conn, conn.SetDeadline(time.Now().Add(timeout))
|
||||||
return conn, nil
|
|
||||||
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
|
@ -490,7 +490,7 @@ func getDiscordReleasePayload(p *api.ReleasePayload, meta *DiscordMeta) (*Discor
|
||||||
Embeds: []DiscordEmbed{
|
Embeds: []DiscordEmbed{
|
||||||
{
|
{
|
||||||
Title: title,
|
Title: title,
|
||||||
Description: fmt.Sprintf("%s", p.Release.Note),
|
Description: p.Release.Note,
|
||||||
URL: url,
|
URL: url,
|
||||||
Color: color,
|
Color: color,
|
||||||
Author: DiscordEmbedAuthor{
|
Author: DiscordEmbedAuthor{
|
||||||
|
|
|
@ -115,7 +115,11 @@ func (repo *Repository) updateWikiPage(doer *User, oldWikiName, newWikiName, con
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer RemoveTemporaryPath(basePath)
|
defer func() {
|
||||||
|
if err := RemoveTemporaryPath(basePath); err != nil {
|
||||||
|
log.Error("Merge: RemoveTemporaryPath: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
cloneOpts := git.CloneRepoOptions{
|
cloneOpts := git.CloneRepoOptions{
|
||||||
Bare: true,
|
Bare: true,
|
||||||
|
@ -246,7 +250,11 @@ func (repo *Repository) DeleteWikiPage(doer *User, wikiName string) (err error)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer RemoveTemporaryPath(basePath)
|
defer func() {
|
||||||
|
if err := RemoveTemporaryPath(basePath); err != nil {
|
||||||
|
log.Error("Merge: RemoveTemporaryPath: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
if err := git.Clone(repo.WikiPath(), basePath, git.CloneRepoOptions{
|
if err := git.Clone(repo.WikiPath(), basePath, git.CloneRepoOptions{
|
||||||
Bare: true,
|
Bare: true,
|
||||||
|
|
|
@ -214,10 +214,8 @@ func SignedInUser(ctx *macaron.Context, sess session.Store) (*models.User, bool)
|
||||||
if err = models.UpdateAccessToken(token); err != nil {
|
if err = models.UpdateAccessToken(token); err != nil {
|
||||||
log.Error("UpdateAccessToken: %v", err)
|
log.Error("UpdateAccessToken: %v", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else if !models.IsErrAccessTokenNotExist(err) && !models.IsErrAccessTokenEmpty(err) {
|
||||||
if !models.IsErrAccessTokenNotExist(err) && !models.IsErrAccessTokenEmpty(err) {
|
log.Error("GetAccessTokenBySha: %v", err)
|
||||||
log.Error("GetAccessTokenBySha: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if u == nil {
|
if u == nil {
|
||||||
|
@ -301,12 +299,6 @@ func GetInclude(field reflect.StructField) string {
|
||||||
return getRuleBody(field, "Include(")
|
return getRuleBody(field, "Include(")
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: struct contains a struct
|
|
||||||
func validateStruct(obj interface{}) binding.Errors {
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func validate(errs binding.Errors, data map[string]interface{}, f Form, l macaron.Locale) binding.Errors {
|
func validate(errs binding.Errors, data map[string]interface{}, f Form, l macaron.Locale) binding.Errors {
|
||||||
if errs.Len() == 0 {
|
if errs.Len() == 0 {
|
||||||
return errs
|
return errs
|
||||||
|
|
|
@ -220,8 +220,7 @@ func GetDefaultProfileURL(provider string) string {
|
||||||
|
|
||||||
// GetDefaultEmailURL return the default email url for the given provider
|
// GetDefaultEmailURL return the default email url for the given provider
|
||||||
func GetDefaultEmailURL(provider string) string {
|
func GetDefaultEmailURL(provider string) string {
|
||||||
switch provider {
|
if provider == "github" {
|
||||||
case "github":
|
|
||||||
return github.EmailURL
|
return github.EmailURL
|
||||||
}
|
}
|
||||||
return ""
|
return ""
|
||||||
|
|
|
@ -39,7 +39,7 @@ func TestTimedDiscoveryCache(t *testing.T) {
|
||||||
t.Errorf("Expected nil, got %v", di)
|
t.Errorf("Expected nil, got %v", di)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sleep one second and try retrive again
|
// Sleep one second and try retrieve again
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
if di := dc.Get("foo"); di != nil {
|
if di := dc.Get("foo"); di != nil {
|
||||||
|
|
|
@ -253,7 +253,7 @@ func (f UpdateThemeForm) IsThemeExists() bool {
|
||||||
var exists bool
|
var exists bool
|
||||||
|
|
||||||
for _, v := range setting.UI.Themes {
|
for _, v := range setting.UI.Themes {
|
||||||
if strings.ToLower(v) == strings.ToLower(f.Theme) {
|
if strings.EqualFold(v, f.Theme) {
|
||||||
exists = true
|
exists = true
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,21 +44,21 @@ var UTF8BOM = []byte{'\xef', '\xbb', '\xbf'}
|
||||||
// EncodeMD5 encodes string to md5 hex value.
|
// EncodeMD5 encodes string to md5 hex value.
|
||||||
func EncodeMD5(str string) string {
|
func EncodeMD5(str string) string {
|
||||||
m := md5.New()
|
m := md5.New()
|
||||||
m.Write([]byte(str))
|
_, _ = m.Write([]byte(str))
|
||||||
return hex.EncodeToString(m.Sum(nil))
|
return hex.EncodeToString(m.Sum(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncodeSha1 string to sha1 hex value.
|
// EncodeSha1 string to sha1 hex value.
|
||||||
func EncodeSha1(str string) string {
|
func EncodeSha1(str string) string {
|
||||||
h := sha1.New()
|
h := sha1.New()
|
||||||
h.Write([]byte(str))
|
_, _ = h.Write([]byte(str))
|
||||||
return hex.EncodeToString(h.Sum(nil))
|
return hex.EncodeToString(h.Sum(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
// EncodeSha256 string to sha1 hex value.
|
// EncodeSha256 string to sha1 hex value.
|
||||||
func EncodeSha256(str string) string {
|
func EncodeSha256(str string) string {
|
||||||
h := sha256.New()
|
h := sha256.New()
|
||||||
h.Write([]byte(str))
|
_, _ = h.Write([]byte(str))
|
||||||
return hex.EncodeToString(h.Sum(nil))
|
return hex.EncodeToString(h.Sum(nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -193,7 +193,7 @@ func CreateTimeLimitCode(data string, minutes int, startInf interface{}) string
|
||||||
|
|
||||||
// create sha1 encode string
|
// create sha1 encode string
|
||||||
sh := sha1.New()
|
sh := sha1.New()
|
||||||
sh.Write([]byte(data + setting.SecretKey + startStr + endStr + com.ToStr(minutes)))
|
_, _ = sh.Write([]byte(data + setting.SecretKey + startStr + endStr + com.ToStr(minutes)))
|
||||||
encoded := hex.EncodeToString(sh.Sum(nil))
|
encoded := hex.EncodeToString(sh.Sum(nil))
|
||||||
|
|
||||||
code := fmt.Sprintf("%s%06d%s", startStr, minutes, encoded)
|
code := fmt.Sprintf("%s%06d%s", startStr, minutes, encoded)
|
||||||
|
@ -425,16 +425,6 @@ const (
|
||||||
EByte = PByte * 1024
|
EByte = PByte * 1024
|
||||||
)
|
)
|
||||||
|
|
||||||
var bytesSizeTable = map[string]uint64{
|
|
||||||
"b": Byte,
|
|
||||||
"kb": KByte,
|
|
||||||
"mb": MByte,
|
|
||||||
"gb": GByte,
|
|
||||||
"tb": TByte,
|
|
||||||
"pb": PByte,
|
|
||||||
"eb": EByte,
|
|
||||||
}
|
|
||||||
|
|
||||||
func logn(n, b float64) float64 {
|
func logn(n, b float64) float64 {
|
||||||
return math.Log(n) / math.Log(b)
|
return math.Log(n) / math.Log(b)
|
||||||
}
|
}
|
||||||
|
@ -582,27 +572,27 @@ func IsTextFile(data []byte) bool {
|
||||||
if len(data) == 0 {
|
if len(data) == 0 {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
return strings.Index(http.DetectContentType(data), "text/") != -1
|
return strings.Contains(http.DetectContentType(data), "text/")
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsImageFile detects if data is an image format
|
// IsImageFile detects if data is an image format
|
||||||
func IsImageFile(data []byte) bool {
|
func IsImageFile(data []byte) bool {
|
||||||
return strings.Index(http.DetectContentType(data), "image/") != -1
|
return strings.Contains(http.DetectContentType(data), "image/")
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsPDFFile detects if data is a pdf format
|
// IsPDFFile detects if data is a pdf format
|
||||||
func IsPDFFile(data []byte) bool {
|
func IsPDFFile(data []byte) bool {
|
||||||
return strings.Index(http.DetectContentType(data), "application/pdf") != -1
|
return strings.Contains(http.DetectContentType(data), "application/pdf")
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsVideoFile detects if data is an video format
|
// IsVideoFile detects if data is an video format
|
||||||
func IsVideoFile(data []byte) bool {
|
func IsVideoFile(data []byte) bool {
|
||||||
return strings.Index(http.DetectContentType(data), "video/") != -1
|
return strings.Contains(http.DetectContentType(data), "video/")
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsAudioFile detects if data is an video format
|
// IsAudioFile detects if data is an video format
|
||||||
func IsAudioFile(data []byte) bool {
|
func IsAudioFile(data []byte) bool {
|
||||||
return strings.Index(http.DetectContentType(data), "audio/") != -1
|
return strings.Contains(http.DetectContentType(data), "audio/")
|
||||||
}
|
}
|
||||||
|
|
||||||
// EntryIcon returns the octicon class for displaying files/directories
|
// EntryIcon returns the octicon class for displaying files/directories
|
||||||
|
|
|
@ -287,20 +287,19 @@ func TestHtmlTimeSince(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFileSize(t *testing.T) {
|
func TestFileSize(t *testing.T) {
|
||||||
var size int64
|
var size int64 = 512
|
||||||
size = 512
|
|
||||||
assert.Equal(t, "512B", FileSize(size))
|
assert.Equal(t, "512B", FileSize(size))
|
||||||
size = size * 1024
|
size *= 1024
|
||||||
assert.Equal(t, "512KB", FileSize(size))
|
assert.Equal(t, "512KB", FileSize(size))
|
||||||
size = size * 1024
|
size *= 1024
|
||||||
assert.Equal(t, "512MB", FileSize(size))
|
assert.Equal(t, "512MB", FileSize(size))
|
||||||
size = size * 1024
|
size *= 1024
|
||||||
assert.Equal(t, "512GB", FileSize(size))
|
assert.Equal(t, "512GB", FileSize(size))
|
||||||
size = size * 1024
|
size *= 1024
|
||||||
assert.Equal(t, "512TB", FileSize(size))
|
assert.Equal(t, "512TB", FileSize(size))
|
||||||
size = size * 1024
|
size *= 1024
|
||||||
assert.Equal(t, "512PB", FileSize(size))
|
assert.Equal(t, "512PB", FileSize(size))
|
||||||
size = size * 4
|
size *= 4
|
||||||
assert.Equal(t, "2.0EB", FileSize(size))
|
assert.Equal(t, "2.0EB", FileSize(size))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
12
modules/cache/cache.go
vendored
12
modules/cache/cache.go
vendored
|
@ -43,7 +43,10 @@ func GetInt(key string, getFunc func() (int, error)) (int, error) {
|
||||||
if value, err = getFunc(); err != nil {
|
if value, err = getFunc(); err != nil {
|
||||||
return value, err
|
return value, err
|
||||||
}
|
}
|
||||||
conn.Put(key, value, int64(setting.CacheService.TTL.Seconds()))
|
err = conn.Put(key, value, int64(setting.CacheService.TTL.Seconds()))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
switch value := conn.Get(key).(type) {
|
switch value := conn.Get(key).(type) {
|
||||||
case int:
|
case int:
|
||||||
|
@ -72,7 +75,10 @@ func GetInt64(key string, getFunc func() (int64, error)) (int64, error) {
|
||||||
if value, err = getFunc(); err != nil {
|
if value, err = getFunc(); err != nil {
|
||||||
return value, err
|
return value, err
|
||||||
}
|
}
|
||||||
conn.Put(key, value, int64(setting.CacheService.TTL.Seconds()))
|
err = conn.Put(key, value, int64(setting.CacheService.TTL.Seconds()))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
switch value := conn.Get(key).(type) {
|
switch value := conn.Get(key).(type) {
|
||||||
case int64:
|
case int64:
|
||||||
|
@ -93,5 +99,5 @@ func Remove(key string) {
|
||||||
if conn == nil {
|
if conn == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
conn.Delete(key)
|
_ = conn.Delete(key)
|
||||||
}
|
}
|
||||||
|
|
|
@ -130,7 +130,6 @@ func (ctx *Context) RedirectToFirst(location ...string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.Redirect(setting.AppSubURL + "/")
|
ctx.Redirect(setting.AppSubURL + "/")
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// HTML calls Context.HTML and converts template name to string.
|
// HTML calls Context.HTML and converts template name to string.
|
||||||
|
@ -266,7 +265,7 @@ func Contexter() macaron.Handler {
|
||||||
}
|
}
|
||||||
c.Header().Set("Content-Type", "text/html")
|
c.Header().Set("Content-Type", "text/html")
|
||||||
c.WriteHeader(http.StatusOK)
|
c.WriteHeader(http.StatusOK)
|
||||||
c.Write([]byte(com.Expand(`<!doctype html>
|
_, _ = c.Write([]byte(com.Expand(`<!doctype html>
|
||||||
<html>
|
<html>
|
||||||
<head>
|
<head>
|
||||||
<meta name="go-import" content="{GoGetImport} git {CloneLink}">
|
<meta name="go-import" content="{GoGetImport} git {CloneLink}">
|
||||||
|
|
|
@ -39,7 +39,7 @@ func (p *Pagination) AddParam(ctx *Context, paramKey string, ctxKey string) {
|
||||||
|
|
||||||
// GetParams returns the configured URL params
|
// GetParams returns the configured URL params
|
||||||
func (p *Pagination) GetParams() template.URL {
|
func (p *Pagination) GetParams() template.URL {
|
||||||
return template.URL(strings.Join(p.urlParams[:], "&"))
|
return template.URL(strings.Join(p.urlParams, "&"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetDefaultParams sets common pagination params that are often used
|
// SetDefaultParams sets common pagination params that are often used
|
||||||
|
|
|
@ -455,15 +455,13 @@ func RepoAssignment() macaron.Handler {
|
||||||
ctx.Repo.PullRequest.BaseRepo = repo.BaseRepo
|
ctx.Repo.PullRequest.BaseRepo = repo.BaseRepo
|
||||||
ctx.Repo.PullRequest.Allowed = true
|
ctx.Repo.PullRequest.Allowed = true
|
||||||
ctx.Repo.PullRequest.HeadInfo = ctx.Repo.Owner.Name + ":" + ctx.Repo.BranchName
|
ctx.Repo.PullRequest.HeadInfo = ctx.Repo.Owner.Name + ":" + ctx.Repo.BranchName
|
||||||
} else {
|
} else if repo.AllowsPulls() {
|
||||||
// Or, this is repository accepts pull requests between branches.
|
// Or, this is repository accepts pull requests between branches.
|
||||||
if repo.AllowsPulls() {
|
ctx.Data["BaseRepo"] = repo
|
||||||
ctx.Data["BaseRepo"] = repo
|
ctx.Repo.PullRequest.BaseRepo = repo
|
||||||
ctx.Repo.PullRequest.BaseRepo = repo
|
ctx.Repo.PullRequest.Allowed = true
|
||||||
ctx.Repo.PullRequest.Allowed = true
|
ctx.Repo.PullRequest.SameRepo = true
|
||||||
ctx.Repo.PullRequest.SameRepo = true
|
ctx.Repo.PullRequest.HeadInfo = ctx.Repo.BranchName
|
||||||
ctx.Repo.PullRequest.HeadInfo = ctx.Repo.BranchName
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ctx.Data["PullRequestCtx"] = ctx.Repo.PullRequest
|
ctx.Data["PullRequestCtx"] = ctx.Repo.PullRequest
|
||||||
|
|
|
@ -50,12 +50,12 @@ func (b *Blob) GetBlobContentBase64() (string, error) {
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
_, err := io.Copy(encoder, dataRc)
|
_, err := io.Copy(encoder, dataRc)
|
||||||
encoder.Close()
|
_ = encoder.Close()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
pw.CloseWithError(err)
|
_ = pw.CloseWithError(err)
|
||||||
} else {
|
} else {
|
||||||
pw.Close()
|
_ = pw.Close()
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -133,7 +133,7 @@ func (c *Commit) ParentCount() int {
|
||||||
|
|
||||||
func isImageFile(data []byte) (string, bool) {
|
func isImageFile(data []byte) (string, bool) {
|
||||||
contentType := http.DetectContentType(data)
|
contentType := http.DetectContentType(data)
|
||||||
if strings.Index(contentType, "image/") != -1 {
|
if strings.Contains(contentType, "image/") {
|
||||||
return contentType, true
|
return contentType, true
|
||||||
}
|
}
|
||||||
return contentType, false
|
return contentType, false
|
||||||
|
@ -206,8 +206,7 @@ func CommitChanges(repoPath string, opts CommitChangesOptions) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func commitsCount(repoPath, revision, relpath string) (int64, error) {
|
func commitsCount(repoPath, revision, relpath string) (int64, error) {
|
||||||
var cmd *Command
|
cmd := NewCommand("rev-list", "--count")
|
||||||
cmd = NewCommand("rev-list", "--count")
|
|
||||||
cmd.AddArguments(revision)
|
cmd.AddArguments(revision)
|
||||||
if len(relpath) > 0 {
|
if len(relpath) > 0 {
|
||||||
cmd.AddArguments("--", relpath)
|
cmd.AddArguments("--", relpath)
|
||||||
|
@ -263,7 +262,7 @@ type SearchCommitsOptions struct {
|
||||||
All bool
|
All bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSearchCommitsOptions contruct a SearchCommitsOption from a space-delimited search string
|
// NewSearchCommitsOptions construct a SearchCommitsOption from a space-delimited search string
|
||||||
func NewSearchCommitsOptions(searchString string, forAllRefs bool) SearchCommitsOptions {
|
func NewSearchCommitsOptions(searchString string, forAllRefs bool) SearchCommitsOptions {
|
||||||
var keywords, authors, committers []string
|
var keywords, authors, committers []string
|
||||||
var after, before string
|
var after, before string
|
||||||
|
|
|
@ -87,16 +87,6 @@ func getCommitTree(c *object.Commit, treePath string) (*object.Tree, error) {
|
||||||
return tree, nil
|
return tree, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFullPath(treePath, path string) string {
|
|
||||||
if treePath != "" {
|
|
||||||
if path != "" {
|
|
||||||
return treePath + "/" + path
|
|
||||||
}
|
|
||||||
return treePath
|
|
||||||
}
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
|
|
||||||
func getFileHashes(c *object.Commit, treePath string, paths []string) (map[string]plumbing.Hash, error) {
|
func getFileHashes(c *object.Commit, treePath string, paths []string) (map[string]plumbing.Hash, error) {
|
||||||
tree, err := getCommitTree(c, treePath)
|
tree, err := getCommitTree(c, treePath)
|
||||||
if err == object.ErrDirectoryNotFound {
|
if err == object.ErrDirectoryNotFound {
|
||||||
|
|
|
@ -58,21 +58,21 @@ func (repo *Repository) parsePrettyFormatLogToList(logs []byte) (*list.List, err
|
||||||
// IsRepoURLAccessible checks if given repository URL is accessible.
|
// IsRepoURLAccessible checks if given repository URL is accessible.
|
||||||
func IsRepoURLAccessible(url string) bool {
|
func IsRepoURLAccessible(url string) bool {
|
||||||
_, err := NewCommand("ls-remote", "-q", "-h", url, "HEAD").Run()
|
_, err := NewCommand("ls-remote", "-q", "-h", url, "HEAD").Run()
|
||||||
if err != nil {
|
return err == nil
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// InitRepository initializes a new Git repository.
|
// InitRepository initializes a new Git repository.
|
||||||
func InitRepository(repoPath string, bare bool) error {
|
func InitRepository(repoPath string, bare bool) error {
|
||||||
os.MkdirAll(repoPath, os.ModePerm)
|
err := os.MkdirAll(repoPath, os.ModePerm)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
cmd := NewCommand("init")
|
cmd := NewCommand("init")
|
||||||
if bare {
|
if bare {
|
||||||
cmd.AddArguments("--bare")
|
cmd.AddArguments("--bare")
|
||||||
}
|
}
|
||||||
_, err := cmd.RunInDir(repoPath)
|
_, err = cmd.RunInDir(repoPath)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,10 +29,7 @@ func IsBranchExist(repoPath, name string) bool {
|
||||||
// IsBranchExist returns true if given branch exists in current repository.
|
// IsBranchExist returns true if given branch exists in current repository.
|
||||||
func (repo *Repository) IsBranchExist(name string) bool {
|
func (repo *Repository) IsBranchExist(name string) bool {
|
||||||
_, err := repo.gogitRepo.Reference(plumbing.ReferenceName(BranchPrefix+name), true)
|
_, err := repo.gogitRepo.Reference(plumbing.ReferenceName(BranchPrefix+name), true)
|
||||||
if err != nil {
|
return err == nil
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Branch represents a Git branch.
|
// Branch represents a Git branch.
|
||||||
|
@ -77,7 +74,7 @@ func (repo *Repository) GetBranches() ([]string, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
branches.ForEach(func(branch *plumbing.Reference) error {
|
_ = branches.ForEach(func(branch *plumbing.Reference) error {
|
||||||
branchNames = append(branchNames, strings.TrimPrefix(branch.Name().String(), BranchPrefix))
|
branchNames = append(branchNames, strings.TrimPrefix(branch.Name().String(), BranchPrefix))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
|
@ -31,10 +31,7 @@ func (repo *Repository) GetRefCommitID(name string) (string, error) {
|
||||||
func (repo *Repository) IsCommitExist(name string) bool {
|
func (repo *Repository) IsCommitExist(name string) bool {
|
||||||
hash := plumbing.NewHash(name)
|
hash := plumbing.NewHash(name)
|
||||||
_, err := repo.gogitRepo.CommitObject(hash)
|
_, err := repo.gogitRepo.CommitObject(hash)
|
||||||
if err != nil {
|
return err == nil
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetBranchCommitID returns last commit ID string of given branch.
|
// GetBranchCommitID returns last commit ID string of given branch.
|
||||||
|
|
|
@ -13,6 +13,8 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
logger "code.gitea.io/gitea/modules/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CompareInfo represents needed information for comparing references.
|
// CompareInfo represents needed information for comparing references.
|
||||||
|
@ -55,7 +57,11 @@ func (repo *Repository) GetCompareInfo(basePath, baseBranch, headBranch string)
|
||||||
if err = repo.AddRemote(tmpRemote, basePath, true); err != nil {
|
if err = repo.AddRemote(tmpRemote, basePath, true); err != nil {
|
||||||
return nil, fmt.Errorf("AddRemote: %v", err)
|
return nil, fmt.Errorf("AddRemote: %v", err)
|
||||||
}
|
}
|
||||||
defer repo.RemoveRemote(tmpRemote)
|
defer func() {
|
||||||
|
if err := repo.RemoveRemote(tmpRemote); err != nil {
|
||||||
|
logger.Error("GetPullRequestInfo: RemoveRemote: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
compareInfo := new(CompareInfo)
|
compareInfo := new(CompareInfo)
|
||||||
|
|
|
@ -24,10 +24,7 @@ func IsTagExist(repoPath, name string) bool {
|
||||||
// IsTagExist returns true if given tag exists in the repository.
|
// IsTagExist returns true if given tag exists in the repository.
|
||||||
func (repo *Repository) IsTagExist(name string) bool {
|
func (repo *Repository) IsTagExist(name string) bool {
|
||||||
_, err := repo.gogitRepo.Reference(plumbing.ReferenceName(TagPrefix+name), true)
|
_, err := repo.gogitRepo.Reference(plumbing.ReferenceName(TagPrefix+name), true)
|
||||||
if err != nil {
|
return err == nil
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateTag create one tag in the repository
|
// CreateTag create one tag in the repository
|
||||||
|
@ -221,7 +218,7 @@ func (repo *Repository) GetTags() ([]string, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tags.ForEach(func(tag *plumbing.Reference) error {
|
_ = tags.ForEach(func(tag *plumbing.Reference) error {
|
||||||
tagNames = append(tagNames, strings.TrimPrefix(tag.Name().String(), TagPrefix))
|
tagNames = append(tagNames, strings.TrimPrefix(tag.Name().String(), TagPrefix))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
|
@ -7,7 +7,6 @@ package git
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
)
|
)
|
||||||
|
@ -75,13 +74,6 @@ func concatenateError(err error, stderr string) error {
|
||||||
return fmt.Errorf("%v - %s", err, stderr)
|
return fmt.Errorf("%v - %s", err, stderr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the object is stored in its own file (i.e not in a pack file),
|
|
||||||
// this function returns the full path to the object file.
|
|
||||||
// It does not test if the file exists.
|
|
||||||
func filepathFromSHA1(rootdir, sha1 string) string {
|
|
||||||
return filepath.Join(rootdir, "objects", sha1[:2], sha1[2:])
|
|
||||||
}
|
|
||||||
|
|
||||||
// RefEndName return the end name of a ref name
|
// RefEndName return the end name of a ref name
|
||||||
func RefEndName(refStr string) string {
|
func RefEndName(refStr string) string {
|
||||||
if strings.HasPrefix(refStr, BranchPrefix) {
|
if strings.HasPrefix(refStr, BranchPrefix) {
|
||||||
|
|
|
@ -74,7 +74,6 @@ func (wp *WriterPool) Put(w *gzip.Writer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
var writerPool WriterPool
|
var writerPool WriterPool
|
||||||
var regex regexp.Regexp
|
|
||||||
|
|
||||||
// Options represents the configuration for the gzip middleware
|
// Options represents the configuration for the gzip middleware
|
||||||
type Options struct {
|
type Options struct {
|
||||||
|
@ -116,7 +115,7 @@ func Middleware(options ...Options) macaron.Handler {
|
||||||
if rangeHdr := ctx.Req.Header.Get(rangeHeader); rangeHdr != "" {
|
if rangeHdr := ctx.Req.Header.Get(rangeHeader); rangeHdr != "" {
|
||||||
|
|
||||||
match := regex.FindStringSubmatch(rangeHdr)
|
match := regex.FindStringSubmatch(rangeHdr)
|
||||||
if match != nil && len(match) > 1 {
|
if len(match) > 1 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -270,9 +269,8 @@ func (proxy *ProxyResponseWriter) Close() error {
|
||||||
|
|
||||||
if proxy.writer == nil {
|
if proxy.writer == nil {
|
||||||
err := proxy.startPlain()
|
err := proxy.startPlain()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
err = fmt.Errorf("GzipMiddleware: write to regular responseWriter at close gets error: %q", err.Error())
|
return fmt.Errorf("GzipMiddleware: write to regular responseWriter at close gets error: %q", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -263,7 +263,7 @@ func (r *Request) getResponse() (*http.Response, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if r.req.Method == "GET" && len(paramBody) > 0 {
|
if r.req.Method == "GET" && len(paramBody) > 0 {
|
||||||
if strings.Index(r.url, "?") != -1 {
|
if strings.Contains(r.url, "?") {
|
||||||
r.url += "&" + paramBody
|
r.url += "&" + paramBody
|
||||||
} else {
|
} else {
|
||||||
r.url = r.url + "?" + paramBody
|
r.url = r.url + "?" + paramBody
|
||||||
|
@ -290,10 +290,13 @@ func (r *Request) getResponse() (*http.Response, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for k, v := range r.params {
|
for k, v := range r.params {
|
||||||
bodyWriter.WriteField(k, v)
|
err := bodyWriter.WriteField(k, v)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
bodyWriter.Close()
|
_ = bodyWriter.Close()
|
||||||
pw.Close()
|
_ = pw.Close()
|
||||||
}()
|
}()
|
||||||
r.Header("Content-Type", bodyWriter.FormDataContentType())
|
r.Header("Content-Type", bodyWriter.FormDataContentType())
|
||||||
r.req.Body = ioutil.NopCloser(pr)
|
r.req.Body = ioutil.NopCloser(pr)
|
||||||
|
@ -323,18 +326,15 @@ func (r *Request) getResponse() (*http.Response, error) {
|
||||||
Proxy: proxy,
|
Proxy: proxy,
|
||||||
Dial: TimeoutDialer(r.setting.ConnectTimeout, r.setting.ReadWriteTimeout),
|
Dial: TimeoutDialer(r.setting.ConnectTimeout, r.setting.ReadWriteTimeout),
|
||||||
}
|
}
|
||||||
} else {
|
} else if t, ok := trans.(*http.Transport); ok {
|
||||||
// if r.transport is *http.Transport then set the settings.
|
if t.TLSClientConfig == nil {
|
||||||
if t, ok := trans.(*http.Transport); ok {
|
t.TLSClientConfig = r.setting.TLSClientConfig
|
||||||
if t.TLSClientConfig == nil {
|
}
|
||||||
t.TLSClientConfig = r.setting.TLSClientConfig
|
if t.Proxy == nil {
|
||||||
}
|
t.Proxy = r.setting.Proxy
|
||||||
if t.Proxy == nil {
|
}
|
||||||
t.Proxy = r.setting.Proxy
|
if t.Dial == nil {
|
||||||
}
|
t.Dial = TimeoutDialer(r.setting.ConnectTimeout, r.setting.ReadWriteTimeout)
|
||||||
if t.Dial == nil {
|
|
||||||
t.Dial = TimeoutDialer(r.setting.ConnectTimeout, r.setting.ReadWriteTimeout)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -461,7 +461,6 @@ func TimeoutDialer(cTimeout time.Duration, rwTimeout time.Duration) func(net, ad
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
conn.SetDeadline(time.Now().Add(rwTimeout))
|
return conn, conn.SetDeadline(time.Now().Add(rwTimeout))
|
||||||
return conn, nil
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,6 @@
|
||||||
package indexer
|
package indexer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
@ -24,15 +23,6 @@ func indexerID(id int64) string {
|
||||||
return strconv.FormatInt(id, 36)
|
return strconv.FormatInt(id, 36)
|
||||||
}
|
}
|
||||||
|
|
||||||
// idOfIndexerID the integer id associated with an indexer id
|
|
||||||
func idOfIndexerID(indexerID string) (int64, error) {
|
|
||||||
id, err := strconv.ParseInt(indexerID, 36, 64)
|
|
||||||
if err != nil {
|
|
||||||
return 0, fmt.Errorf("Unexpected indexer ID %s: %v", indexerID, err)
|
|
||||||
}
|
|
||||||
return id, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// numericEqualityQuery a numeric equality query for the given value and field
|
// numericEqualityQuery a numeric equality query for the given value and field
|
||||||
func numericEqualityQuery(value int64, field string) *query.NumericRangeQuery {
|
func numericEqualityQuery(value int64, field string) *query.NumericRangeQuery {
|
||||||
f := float64(value)
|
f := float64(value)
|
||||||
|
@ -42,13 +32,6 @@ func numericEqualityQuery(value int64, field string) *query.NumericRangeQuery {
|
||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMatchPhraseQuery(matchPhrase, field, analyzer string) *query.MatchPhraseQuery {
|
|
||||||
q := bleve.NewMatchPhraseQuery(matchPhrase)
|
|
||||||
q.FieldVal = field
|
|
||||||
q.Analyzer = analyzer
|
|
||||||
return q
|
|
||||||
}
|
|
||||||
|
|
||||||
const unicodeNormalizeName = "unicodeNormalize"
|
const unicodeNormalizeName = "unicodeNormalize"
|
||||||
|
|
||||||
func addUnicodeNormalizeTokenFilter(m *mapping.IndexMappingImpl) error {
|
func addUnicodeNormalizeTokenFilter(m *mapping.IndexMappingImpl) error {
|
||||||
|
|
|
@ -101,7 +101,12 @@ func InitIssueIndexer(syncReindex bool) error {
|
||||||
return fmt.Errorf("Unsupported indexer queue type: %v", setting.Indexer.IssueQueueType)
|
return fmt.Errorf("Unsupported indexer queue type: %v", setting.Indexer.IssueQueueType)
|
||||||
}
|
}
|
||||||
|
|
||||||
go issueIndexerQueue.Run()
|
go func() {
|
||||||
|
err = issueIndexerQueue.Run()
|
||||||
|
if err != nil {
|
||||||
|
log.Error("issueIndexerQueue.Run: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
if populate {
|
if populate {
|
||||||
if syncReindex {
|
if syncReindex {
|
||||||
|
@ -161,7 +166,7 @@ func UpdateIssueIndexer(issue *models.Issue) {
|
||||||
comments = append(comments, comment.Content)
|
comments = append(comments, comment.Content)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
issueIndexerQueue.Push(&IndexerData{
|
_ = issueIndexerQueue.Push(&IndexerData{
|
||||||
ID: issue.ID,
|
ID: issue.ID,
|
||||||
RepoID: issue.RepoID,
|
RepoID: issue.RepoID,
|
||||||
Title: issue.Title,
|
Title: issue.Title,
|
||||||
|
@ -179,11 +184,11 @@ func DeleteRepoIssueIndexer(repo *models.Repository) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(ids) <= 0 {
|
if len(ids) == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
issueIndexerQueue.Push(&IndexerData{
|
_ = issueIndexerQueue.Push(&IndexerData{
|
||||||
IDs: ids,
|
IDs: ids,
|
||||||
IsDelete: true,
|
IsDelete: true,
|
||||||
})
|
})
|
||||||
|
|
|
@ -34,20 +34,20 @@ func (c *ChannelQueue) Run() error {
|
||||||
select {
|
select {
|
||||||
case data := <-c.queue:
|
case data := <-c.queue:
|
||||||
if data.IsDelete {
|
if data.IsDelete {
|
||||||
c.indexer.Delete(data.IDs...)
|
_ = c.indexer.Delete(data.IDs...)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
datas = append(datas, data)
|
datas = append(datas, data)
|
||||||
if len(datas) >= c.batchNumber {
|
if len(datas) >= c.batchNumber {
|
||||||
c.indexer.Index(datas)
|
_ = c.indexer.Index(datas)
|
||||||
// TODO: save the point
|
// TODO: save the point
|
||||||
datas = make([]*IndexerData, 0, c.batchNumber)
|
datas = make([]*IndexerData, 0, c.batchNumber)
|
||||||
}
|
}
|
||||||
case <-time.After(time.Millisecond * 100):
|
case <-time.After(time.Millisecond * 100):
|
||||||
i++
|
i++
|
||||||
if i >= 3 && len(datas) > 0 {
|
if i >= 3 && len(datas) > 0 {
|
||||||
c.indexer.Index(datas)
|
_ = c.indexer.Index(datas)
|
||||||
// TODO: save the point
|
// TODO: save the point
|
||||||
datas = make([]*IndexerData, 0, c.batchNumber)
|
datas = make([]*IndexerData, 0, c.batchNumber)
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,7 +44,7 @@ func (l *LevelQueue) Run() error {
|
||||||
for {
|
for {
|
||||||
i++
|
i++
|
||||||
if len(datas) > l.batchNumber || (len(datas) > 0 && i > 3) {
|
if len(datas) > l.batchNumber || (len(datas) > 0 && i > 3) {
|
||||||
l.indexer.Index(datas)
|
_ = l.indexer.Index(datas)
|
||||||
datas = make([]*IndexerData, 0, l.batchNumber)
|
datas = make([]*IndexerData, 0, l.batchNumber)
|
||||||
i = 0
|
i = 0
|
||||||
continue
|
continue
|
||||||
|
@ -59,7 +59,7 @@ func (l *LevelQueue) Run() error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(bs) <= 0 {
|
if len(bs) == 0 {
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -96,12 +96,12 @@ func (r *RedisQueue) Run() error {
|
||||||
|
|
||||||
i++
|
i++
|
||||||
if len(datas) > r.batchNumber || (len(datas) > 0 && i > 3) {
|
if len(datas) > r.batchNumber || (len(datas) > 0 && i > 3) {
|
||||||
r.indexer.Index(datas)
|
_ = r.indexer.Index(datas)
|
||||||
datas = make([]*IndexerData, 0, r.batchNumber)
|
datas = make([]*IndexerData, 0, r.batchNumber)
|
||||||
i = 0
|
i = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(bs) <= 0 {
|
if len(bs) == 0 {
|
||||||
time.Sleep(time.Millisecond * 100)
|
time.Sleep(time.Millisecond * 100)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,7 +17,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
//checkIsValidRequest check if it a valid request in case of bad request it write the response to ctx.
|
//checkIsValidRequest check if it a valid request in case of bad request it write the response to ctx.
|
||||||
func checkIsValidRequest(ctx *context.Context, post bool) bool {
|
func checkIsValidRequest(ctx *context.Context) bool {
|
||||||
if !setting.LFS.StartServer {
|
if !setting.LFS.StartServer {
|
||||||
writeStatus(ctx, 404)
|
writeStatus(ctx, 404)
|
||||||
return false
|
return false
|
||||||
|
@ -35,13 +35,6 @@ func checkIsValidRequest(ctx *context.Context, post bool) bool {
|
||||||
}
|
}
|
||||||
ctx.User = user
|
ctx.User = user
|
||||||
}
|
}
|
||||||
if post {
|
|
||||||
mediaParts := strings.Split(ctx.Req.Header.Get("Content-Type"), ";")
|
|
||||||
if mediaParts[0] != metaMediaType {
|
|
||||||
writeStatus(ctx, 400)
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,7 +64,7 @@ func handleLockListOut(ctx *context.Context, repo *models.Repository, lock *mode
|
||||||
|
|
||||||
// GetListLockHandler list locks
|
// GetListLockHandler list locks
|
||||||
func GetListLockHandler(ctx *context.Context) {
|
func GetListLockHandler(ctx *context.Context) {
|
||||||
if !checkIsValidRequest(ctx, false) {
|
if !checkIsValidRequest(ctx) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ctx.Resp.Header().Set("Content-Type", metaMediaType)
|
ctx.Resp.Header().Set("Content-Type", metaMediaType)
|
||||||
|
@ -135,7 +128,7 @@ func GetListLockHandler(ctx *context.Context) {
|
||||||
|
|
||||||
// PostLockHandler create lock
|
// PostLockHandler create lock
|
||||||
func PostLockHandler(ctx *context.Context) {
|
func PostLockHandler(ctx *context.Context) {
|
||||||
if !checkIsValidRequest(ctx, false) {
|
if !checkIsValidRequest(ctx) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ctx.Resp.Header().Set("Content-Type", metaMediaType)
|
ctx.Resp.Header().Set("Content-Type", metaMediaType)
|
||||||
|
@ -198,7 +191,7 @@ func PostLockHandler(ctx *context.Context) {
|
||||||
|
|
||||||
// VerifyLockHandler list locks for verification
|
// VerifyLockHandler list locks for verification
|
||||||
func VerifyLockHandler(ctx *context.Context) {
|
func VerifyLockHandler(ctx *context.Context) {
|
||||||
if !checkIsValidRequest(ctx, false) {
|
if !checkIsValidRequest(ctx) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ctx.Resp.Header().Set("Content-Type", metaMediaType)
|
ctx.Resp.Header().Set("Content-Type", metaMediaType)
|
||||||
|
@ -249,7 +242,7 @@ func VerifyLockHandler(ctx *context.Context) {
|
||||||
|
|
||||||
// UnLockHandler delete locks
|
// UnLockHandler delete locks
|
||||||
func UnLockHandler(ctx *context.Context) {
|
func UnLockHandler(ctx *context.Context) {
|
||||||
if !checkIsValidRequest(ctx, false) {
|
if !checkIsValidRequest(ctx) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
ctx.Resp.Header().Set("Content-Type", metaMediaType)
|
ctx.Resp.Header().Set("Content-Type", metaMediaType)
|
||||||
|
|
|
@ -152,7 +152,7 @@ func getContentHandler(ctx *context.Context) {
|
||||||
if rangeHdr := ctx.Req.Header.Get("Range"); rangeHdr != "" {
|
if rangeHdr := ctx.Req.Header.Get("Range"); rangeHdr != "" {
|
||||||
regex := regexp.MustCompile(`bytes=(\d+)\-.*`)
|
regex := regexp.MustCompile(`bytes=(\d+)\-.*`)
|
||||||
match := regex.FindStringSubmatch(rangeHdr)
|
match := regex.FindStringSubmatch(rangeHdr)
|
||||||
if match != nil && len(match) > 1 {
|
if len(match) > 1 {
|
||||||
statusCode = 206
|
statusCode = 206
|
||||||
fromByte, _ = strconv.ParseInt(match[1], 10, 32)
|
fromByte, _ = strconv.ParseInt(match[1], 10, 32)
|
||||||
ctx.Resp.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", fromByte, meta.Size-1, meta.Size-fromByte))
|
ctx.Resp.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", fromByte, meta.Size-1, meta.Size-fromByte))
|
||||||
|
@ -178,8 +178,8 @@ func getContentHandler(ctx *context.Context) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx.Resp.WriteHeader(statusCode)
|
ctx.Resp.WriteHeader(statusCode)
|
||||||
io.Copy(ctx.Resp, content)
|
_, _ = io.Copy(ctx.Resp, content)
|
||||||
content.Close()
|
_ = content.Close()
|
||||||
logRequest(ctx.Req, statusCode)
|
logRequest(ctx.Req, statusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -196,7 +196,7 @@ func getMetaHandler(ctx *context.Context) {
|
||||||
|
|
||||||
if ctx.Req.Method == "GET" {
|
if ctx.Req.Method == "GET" {
|
||||||
enc := json.NewEncoder(ctx.Resp)
|
enc := json.NewEncoder(ctx.Resp)
|
||||||
enc.Encode(Represent(rv, meta, true, false))
|
_ = enc.Encode(Represent(rv, meta, true, false))
|
||||||
}
|
}
|
||||||
|
|
||||||
logRequest(ctx.Req, 200)
|
logRequest(ctx.Req, 200)
|
||||||
|
@ -249,7 +249,7 @@ func PostHandler(ctx *context.Context) {
|
||||||
ctx.Resp.WriteHeader(sentStatus)
|
ctx.Resp.WriteHeader(sentStatus)
|
||||||
|
|
||||||
enc := json.NewEncoder(ctx.Resp)
|
enc := json.NewEncoder(ctx.Resp)
|
||||||
enc.Encode(Represent(rv, meta, meta.Existing, true))
|
_ = enc.Encode(Represent(rv, meta, meta.Existing, true))
|
||||||
logRequest(ctx.Req, sentStatus)
|
logRequest(ctx.Req, sentStatus)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -313,7 +313,7 @@ func BatchHandler(ctx *context.Context) {
|
||||||
respobj := &BatchResponse{Objects: responseObjects}
|
respobj := &BatchResponse{Objects: responseObjects}
|
||||||
|
|
||||||
enc := json.NewEncoder(ctx.Resp)
|
enc := json.NewEncoder(ctx.Resp)
|
||||||
enc.Encode(respobj)
|
_ = enc.Encode(respobj)
|
||||||
logRequest(ctx.Req, 200)
|
logRequest(ctx.Req, 200)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -208,7 +208,7 @@ normalLoop:
|
||||||
|
|
||||||
if i > lasti {
|
if i > lasti {
|
||||||
written, err := c.w.Write(bytes[lasti:i])
|
written, err := c.w.Write(bytes[lasti:i])
|
||||||
totalWritten = totalWritten + written
|
totalWritten += written
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return totalWritten, err
|
return totalWritten, err
|
||||||
}
|
}
|
||||||
|
@ -243,7 +243,7 @@ normalLoop:
|
||||||
if bytes[j] == 'm' {
|
if bytes[j] == 'm' {
|
||||||
if c.mode == allowColor {
|
if c.mode == allowColor {
|
||||||
written, err := c.w.Write(bytes[i : j+1])
|
written, err := c.w.Write(bytes[i : j+1])
|
||||||
totalWritten = totalWritten + written
|
totalWritten += written
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return totalWritten, err
|
return totalWritten, err
|
||||||
}
|
}
|
||||||
|
@ -278,7 +278,7 @@ func ColorSprintf(format string, args ...interface{}) string {
|
||||||
}
|
}
|
||||||
return fmt.Sprintf(format, v...)
|
return fmt.Sprintf(format, v...)
|
||||||
}
|
}
|
||||||
return fmt.Sprintf(format)
|
return format
|
||||||
}
|
}
|
||||||
|
|
||||||
// ColorFprintf will write to the provided writer similar to ColorSprintf
|
// ColorFprintf will write to the provided writer similar to ColorSprintf
|
||||||
|
@ -290,7 +290,7 @@ func ColorFprintf(w io.Writer, format string, args ...interface{}) (int, error)
|
||||||
}
|
}
|
||||||
return fmt.Fprintf(w, format, v...)
|
return fmt.Fprintf(w, format, v...)
|
||||||
}
|
}
|
||||||
return fmt.Fprintf(w, format)
|
return fmt.Fprint(w, format)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ColorFormatted structs provide their own colored string when formatted with ColorSprintf
|
// ColorFormatted structs provide their own colored string when formatted with ColorSprintf
|
||||||
|
|
|
@ -67,7 +67,10 @@ func (i *connWriter) connect() error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if tcpConn, ok := conn.(*net.TCPConn); ok {
|
if tcpConn, ok := conn.(*net.TCPConn); ok {
|
||||||
tcpConn.SetKeepAlive(true)
|
err = tcpConn.SetKeepAlive(true)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
i.innerWriter = conn
|
i.innerWriter = conn
|
||||||
|
|
|
@ -24,7 +24,6 @@ func listenReadAndClose(t *testing.T, l net.Listener, expected string) {
|
||||||
|
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, expected, string(written))
|
assert.Equal(t, expected, string(written))
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConnLogger(t *testing.T) {
|
func TestConnLogger(t *testing.T) {
|
||||||
|
|
|
@ -79,7 +79,7 @@ func (l *ChannelledLog) Start() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
l.loggerProvider.Flush()
|
l.loggerProvider.Flush()
|
||||||
case _, _ = <-l.close:
|
case <-l.close:
|
||||||
l.closeLogger()
|
l.closeLogger()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -104,7 +104,6 @@ func (l *ChannelledLog) closeLogger() {
|
||||||
l.loggerProvider.Flush()
|
l.loggerProvider.Flush()
|
||||||
l.loggerProvider.Close()
|
l.loggerProvider.Close()
|
||||||
l.closed <- true
|
l.closed <- true
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close this ChannelledLog
|
// Close this ChannelledLog
|
||||||
|
@ -228,7 +227,6 @@ func (m *MultiChannelledLog) closeLoggers() {
|
||||||
}
|
}
|
||||||
m.mutex.Unlock()
|
m.mutex.Unlock()
|
||||||
m.closed <- true
|
m.closed <- true
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start processing the MultiChannelledLog
|
// Start processing the MultiChannelledLog
|
||||||
|
|
|
@ -223,7 +223,7 @@ func compressOldLogFile(fname string, compressionLevel int) error {
|
||||||
|
|
||||||
func (log *FileLogger) deleteOldLog() {
|
func (log *FileLogger) deleteOldLog() {
|
||||||
dir := filepath.Dir(log.Filename)
|
dir := filepath.Dir(log.Filename)
|
||||||
filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) {
|
_ = filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if r := recover(); r != nil {
|
if r := recover(); r != nil {
|
||||||
returnErr = fmt.Errorf("Unable to delete old log '%s', error: %+v", path, r)
|
returnErr = fmt.Errorf("Unable to delete old log '%s', error: %+v", path, r)
|
||||||
|
@ -246,7 +246,7 @@ func (log *FileLogger) deleteOldLog() {
|
||||||
// there are no buffering messages in file logger in memory.
|
// there are no buffering messages in file logger in memory.
|
||||||
// flush file means sync file from disk.
|
// flush file means sync file from disk.
|
||||||
func (log *FileLogger) Flush() {
|
func (log *FileLogger) Flush() {
|
||||||
log.mw.fd.Sync()
|
_ = log.mw.fd.Sync()
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetName returns the default name for this implementation
|
// GetName returns the default name for this implementation
|
||||||
|
|
|
@ -103,7 +103,7 @@ func TestFileLogger(t *testing.T) {
|
||||||
assert.Equal(t, expected, string(logData))
|
assert.Equal(t, expected, string(logData))
|
||||||
|
|
||||||
event.level = WARN
|
event.level = WARN
|
||||||
expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
expected += fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
fileLogger.LogEvent(&event)
|
fileLogger.LogEvent(&event)
|
||||||
fileLogger.Flush()
|
fileLogger.Flush()
|
||||||
logData, err = ioutil.ReadFile(filename)
|
logData, err = ioutil.ReadFile(filename)
|
||||||
|
@ -130,7 +130,7 @@ func TestFileLogger(t *testing.T) {
|
||||||
err = realFileLogger.DoRotate()
|
err = realFileLogger.DoRotate()
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
|
|
||||||
expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
expected += fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
fileLogger.LogEvent(&event)
|
fileLogger.LogEvent(&event)
|
||||||
fileLogger.Flush()
|
fileLogger.Flush()
|
||||||
logData, err = ioutil.ReadFile(filename)
|
logData, err = ioutil.ReadFile(filename)
|
||||||
|
@ -138,7 +138,7 @@ func TestFileLogger(t *testing.T) {
|
||||||
assert.Equal(t, expected, string(logData))
|
assert.Equal(t, expected, string(logData))
|
||||||
|
|
||||||
// Should fail to rotate
|
// Should fail to rotate
|
||||||
expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
expected += fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
fileLogger.LogEvent(&event)
|
fileLogger.LogEvent(&event)
|
||||||
fileLogger.Flush()
|
fileLogger.Flush()
|
||||||
logData, err = ioutil.ReadFile(filename)
|
logData, err = ioutil.ReadFile(filename)
|
||||||
|
@ -188,7 +188,7 @@ func TestCompressFileLogger(t *testing.T) {
|
||||||
assert.Equal(t, expected, string(logData))
|
assert.Equal(t, expected, string(logData))
|
||||||
|
|
||||||
event.level = WARN
|
event.level = WARN
|
||||||
expected = expected + fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
expected += fmt.Sprintf("%s%s %s:%d:%s [%c] %s\n", prefix, dateString, event.filename, event.line, event.caller, strings.ToUpper(event.level.String())[0], event.msg)
|
||||||
fileLogger.LogEvent(&event)
|
fileLogger.LogEvent(&event)
|
||||||
fileLogger.Flush()
|
fileLogger.Flush()
|
||||||
logData, err = ioutil.ReadFile(filename)
|
logData, err = ioutil.ReadFile(filename)
|
||||||
|
|
|
@ -57,7 +57,7 @@ func FlagsFromString(from string) int {
|
||||||
for _, flag := range strings.Split(strings.ToLower(from), ",") {
|
for _, flag := range strings.Split(strings.ToLower(from), ",") {
|
||||||
f, ok := flagFromString[strings.TrimSpace(flag)]
|
f, ok := flagFromString[strings.TrimSpace(flag)]
|
||||||
if ok {
|
if ok {
|
||||||
flags = flags | f
|
flags |= f
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return flags
|
return flags
|
||||||
|
|
|
@ -218,7 +218,7 @@ func (l *LoggerAsWriter) Write(p []byte) (int, error) {
|
||||||
func (l *LoggerAsWriter) Log(msg string) {
|
func (l *LoggerAsWriter) Log(msg string) {
|
||||||
for _, logger := range l.ourLoggers {
|
for _, logger := range l.ourLoggers {
|
||||||
// Set the skip to reference the call just above this
|
// Set the skip to reference the call just above this
|
||||||
logger.Log(1, l.level, msg)
|
_ = logger.Log(1, l.level, msg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,10 +11,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
subjectPhrase = "Diagnostic message from server"
|
|
||||||
)
|
|
||||||
|
|
||||||
type smtpWriter struct {
|
type smtpWriter struct {
|
||||||
owner *SMTPLogger
|
owner *SMTPLogger
|
||||||
}
|
}
|
||||||
|
|
|
@ -252,10 +252,7 @@ func (logger *WriterLogger) Match(event *Event) bool {
|
||||||
mode: removeColor,
|
mode: removeColor,
|
||||||
}).Write([]byte(event.msg))
|
}).Write([]byte(event.msg))
|
||||||
msg = baw
|
msg = baw
|
||||||
if logger.regexp.Match(msg) {
|
return logger.regexp.Match(msg)
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close the base logger
|
// Close the base logger
|
||||||
|
|
|
@ -258,15 +258,12 @@ func (s *dummySender) Send(from string, to []string, msg io.WriterTo) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func processMailQueue() {
|
func processMailQueue() {
|
||||||
for {
|
for msg := range mailQueue {
|
||||||
select {
|
log.Trace("New e-mail sending request %s: %s", msg.GetHeader("To"), msg.Info)
|
||||||
case msg := <-mailQueue:
|
if err := gomail.Send(Sender, msg.Message); err != nil {
|
||||||
log.Trace("New e-mail sending request %s: %s", msg.GetHeader("To"), msg.Info)
|
log.Error("Failed to send emails %s: %s - %v", msg.GetHeader("To"), msg.Info, err)
|
||||||
if err := gomail.Send(Sender, msg.Message); err != nil {
|
} else {
|
||||||
log.Error("Failed to send emails %s: %s - %v", msg.GetHeader("To"), msg.Info, err)
|
log.Trace("E-mails sent %s: %s", msg.GetHeader("To"), msg.Info)
|
||||||
} else {
|
|
||||||
log.Trace("E-mails sent %s: %s", msg.GetHeader("To"), msg.Info)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -108,24 +108,6 @@ func FindAllMentions(content string) []string {
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
// cutoutVerbosePrefix cutouts URL prefix including sub-path to
|
|
||||||
// return a clean unified string of request URL path.
|
|
||||||
func cutoutVerbosePrefix(prefix string) string {
|
|
||||||
if len(prefix) == 0 || prefix[0] != '/' {
|
|
||||||
return prefix
|
|
||||||
}
|
|
||||||
count := 0
|
|
||||||
for i := 0; i < len(prefix); i++ {
|
|
||||||
if prefix[i] == '/' {
|
|
||||||
count++
|
|
||||||
}
|
|
||||||
if count >= 3+setting.AppSubURLDepth {
|
|
||||||
return prefix[:i]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return prefix
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsSameDomain checks if given url string has the same hostname as current Gitea instance
|
// IsSameDomain checks if given url string has the same hostname as current Gitea instance
|
||||||
func IsSameDomain(s string) bool {
|
func IsSameDomain(s string) bool {
|
||||||
if strings.HasPrefix(s, "/") {
|
if strings.HasPrefix(s, "/") {
|
||||||
|
@ -146,7 +128,7 @@ type postProcessError struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *postProcessError) Error() string {
|
func (p *postProcessError) Error() string {
|
||||||
return "PostProcess: " + p.context + ", " + p.Error()
|
return "PostProcess: " + p.context + ", " + p.err.Error()
|
||||||
}
|
}
|
||||||
|
|
||||||
type processor func(ctx *postProcessCtx, node *html.Node)
|
type processor func(ctx *postProcessCtx, node *html.Node)
|
||||||
|
@ -304,20 +286,6 @@ func (ctx *postProcessCtx) visitNode(node *html.Node) {
|
||||||
// ignore everything else
|
// ignore everything else
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ctx *postProcessCtx) visitNodeForShortLinks(node *html.Node) {
|
|
||||||
switch node.Type {
|
|
||||||
case html.TextNode:
|
|
||||||
shortLinkProcessorFull(ctx, node, true)
|
|
||||||
case html.ElementNode:
|
|
||||||
if node.Data == "code" || node.Data == "pre" || node.Data == "a" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for n := node.FirstChild; n != nil; n = n.NextSibling {
|
|
||||||
ctx.visitNodeForShortLinks(n)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// textNode runs the passed node through various processors, in order to handle
|
// textNode runs the passed node through various processors, in order to handle
|
||||||
// all kinds of special links handled by the post-processing.
|
// all kinds of special links handled by the post-processing.
|
||||||
func (ctx *postProcessCtx) textNode(node *html.Node) {
|
func (ctx *postProcessCtx) textNode(node *html.Node) {
|
||||||
|
|
|
@ -29,11 +29,6 @@ func numericIssueLink(baseURL string, index int) string {
|
||||||
return link(util.URLJoin(baseURL, strconv.Itoa(index)), fmt.Sprintf("#%d", index))
|
return link(util.URLJoin(baseURL, strconv.Itoa(index)), fmt.Sprintf("#%d", index))
|
||||||
}
|
}
|
||||||
|
|
||||||
// urlContentsLink an HTML link whose contents is the target URL
|
|
||||||
func urlContentsLink(href string) string {
|
|
||||||
return link(href, href)
|
|
||||||
}
|
|
||||||
|
|
||||||
// link an HTML link
|
// link an HTML link
|
||||||
func link(href, contents string) string {
|
func link(href, contents string) string {
|
||||||
return fmt.Sprintf("<a href=\"%s\">%s</a>", href, contents)
|
return fmt.Sprintf("<a href=\"%s\">%s</a>", href, contents)
|
||||||
|
|
|
@ -35,12 +35,9 @@ func NewNotifier() base.Notifier {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ns *notificationService) Run() {
|
func (ns *notificationService) Run() {
|
||||||
for {
|
for opts := range ns.issueQueue {
|
||||||
select {
|
if err := models.CreateOrUpdateIssueNotifications(opts.issue, opts.notificationAuthorID); err != nil {
|
||||||
case opts := <-ns.issueQueue:
|
log.Error("Was unable to create issue notification: %v", err)
|
||||||
if err := models.CreateOrUpdateIssueNotifications(opts.issue, opts.notificationAuthorID); err != nil {
|
|
||||||
log.Error("Was unable to create issue notification: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user