2019-12-07 05:44:10 +03:00
|
|
|
// Copyright 2019 The Gitea Authors.
|
|
|
|
// All rights reserved.
|
2022-11-27 21:20:29 +03:00
|
|
|
// SPDX-License-Identifier: MIT
|
2019-12-07 05:44:10 +03:00
|
|
|
|
|
|
|
package pull
|
|
|
|
|
|
|
|
import (
|
2019-12-15 12:51:28 +03:00
|
|
|
"context"
|
2022-03-31 17:53:08 +03:00
|
|
|
"errors"
|
2019-12-07 05:44:10 +03:00
|
|
|
"fmt"
|
2020-02-03 02:19:58 +03:00
|
|
|
"strconv"
|
2019-12-07 05:44:10 +03:00
|
|
|
"strings"
|
|
|
|
|
|
|
|
"code.gitea.io/gitea/models"
|
2022-05-03 22:46:28 +03:00
|
|
|
"code.gitea.io/gitea/models/db"
|
2023-01-16 11:00:22 +03:00
|
|
|
git_model "code.gitea.io/gitea/models/git"
|
2022-06-13 12:37:59 +03:00
|
|
|
issues_model "code.gitea.io/gitea/models/issues"
|
2022-05-11 13:09:36 +03:00
|
|
|
access_model "code.gitea.io/gitea/models/perm/access"
|
2021-12-10 04:27:50 +03:00
|
|
|
repo_model "code.gitea.io/gitea/models/repo"
|
2021-11-09 22:57:58 +03:00
|
|
|
"code.gitea.io/gitea/models/unit"
|
2021-11-24 12:49:20 +03:00
|
|
|
user_model "code.gitea.io/gitea/models/user"
|
2019-12-07 05:44:10 +03:00
|
|
|
"code.gitea.io/gitea/modules/git"
|
2019-12-15 12:51:28 +03:00
|
|
|
"code.gitea.io/gitea/modules/graceful"
|
2019-12-07 05:44:10 +03:00
|
|
|
"code.gitea.io/gitea/modules/log"
|
2019-12-16 00:57:34 +03:00
|
|
|
"code.gitea.io/gitea/modules/notification"
|
2022-01-20 02:26:57 +03:00
|
|
|
"code.gitea.io/gitea/modules/process"
|
2020-02-03 02:19:58 +03:00
|
|
|
"code.gitea.io/gitea/modules/queue"
|
2019-12-07 05:44:10 +03:00
|
|
|
"code.gitea.io/gitea/modules/timeutil"
|
2022-03-31 17:53:08 +03:00
|
|
|
asymkey_service "code.gitea.io/gitea/services/asymkey"
|
2019-12-07 05:44:10 +03:00
|
|
|
)
|
|
|
|
|
2022-05-02 02:54:44 +03:00
|
|
|
// prPatchCheckerQueue represents a queue to handle update pull request tests
|
|
|
|
var prPatchCheckerQueue queue.UniqueQueue
|
2019-12-07 05:44:10 +03:00
|
|
|
|
2022-03-31 17:53:08 +03:00
|
|
|
var (
|
2022-05-02 02:54:44 +03:00
|
|
|
ErrIsClosed = errors.New("pull is closed")
|
|
|
|
ErrUserNotAllowedToMerge = models.ErrDisallowedToMerge{}
|
2022-03-31 17:53:08 +03:00
|
|
|
ErrHasMerged = errors.New("has already been merged")
|
|
|
|
ErrIsWorkInProgress = errors.New("work in progress PRs cannot be merged")
|
2022-04-20 17:43:15 +03:00
|
|
|
ErrIsChecking = errors.New("cannot merge while conflict checking is in progress")
|
2022-03-31 17:53:08 +03:00
|
|
|
ErrNotMergableState = errors.New("not in mergeable state")
|
|
|
|
ErrDependenciesLeft = errors.New("is blocked by an open dependency")
|
|
|
|
)
|
|
|
|
|
2019-12-07 05:44:10 +03:00
|
|
|
// AddToTaskQueue adds itself to pull request test task queue.
|
2022-06-13 12:37:59 +03:00
|
|
|
func AddToTaskQueue(pr *issues_model.PullRequest) {
|
2022-05-02 02:54:44 +03:00
|
|
|
err := prPatchCheckerQueue.PushFunc(strconv.FormatInt(pr.ID, 10), func() error {
|
2022-06-13 12:37:59 +03:00
|
|
|
pr.Status = issues_model.PullRequestStatusChecking
|
2022-11-19 11:12:33 +03:00
|
|
|
err := pr.UpdateColsIfNotMerged(db.DefaultContext, "status")
|
2021-06-09 22:52:55 +03:00
|
|
|
if err != nil {
|
2023-02-04 02:11:48 +03:00
|
|
|
log.Error("AddToTaskQueue(%-v).UpdateCols.(add to queue): %v", pr, err)
|
2021-06-09 22:52:55 +03:00
|
|
|
} else {
|
2023-02-04 02:11:48 +03:00
|
|
|
log.Trace("Adding %-v to the test pull requests queue", pr)
|
2019-12-07 05:44:10 +03:00
|
|
|
}
|
2021-06-09 22:52:55 +03:00
|
|
|
return err
|
|
|
|
})
|
|
|
|
if err != nil && err != queue.ErrAlreadyInQueue {
|
2023-02-04 02:11:48 +03:00
|
|
|
log.Error("Error adding %-v to the test pull requests queue: %v", pr, err)
|
2021-06-09 22:52:55 +03:00
|
|
|
}
|
2019-12-07 05:44:10 +03:00
|
|
|
}
|
|
|
|
|
2022-03-31 17:53:08 +03:00
|
|
|
// CheckPullMergable check if the pull mergable based on all conditions (branch protection, merge options, ...)
|
2022-06-13 12:37:59 +03:00
|
|
|
func CheckPullMergable(stdCtx context.Context, doer *user_model.User, perm *access_model.Permission, pr *issues_model.PullRequest, manuallMerge, force bool) error {
|
2022-11-12 23:18:50 +03:00
|
|
|
return db.WithTx(stdCtx, func(ctx context.Context) error {
|
2022-05-03 22:46:28 +03:00
|
|
|
if pr.HasMerged {
|
|
|
|
return ErrHasMerged
|
|
|
|
}
|
2022-03-31 17:53:08 +03:00
|
|
|
|
2022-11-19 11:12:33 +03:00
|
|
|
if err := pr.LoadIssue(ctx); err != nil {
|
2023-02-04 02:11:48 +03:00
|
|
|
log.Error("Unable to load issue[%d] for %-v: %v", pr.IssueID, pr, err)
|
2022-05-03 22:46:28 +03:00
|
|
|
return err
|
|
|
|
} else if pr.Issue.IsClosed {
|
|
|
|
return ErrIsClosed
|
|
|
|
}
|
2022-03-31 17:53:08 +03:00
|
|
|
|
2022-05-03 22:46:28 +03:00
|
|
|
if allowedMerge, err := IsUserAllowedToMerge(ctx, pr, *perm, doer); err != nil {
|
2023-02-04 02:11:48 +03:00
|
|
|
log.Error("Error whilst checking if %-v is allowed to merge %-v: %v", doer, pr, err)
|
2022-05-03 22:46:28 +03:00
|
|
|
return err
|
|
|
|
} else if !allowedMerge {
|
|
|
|
return ErrUserNotAllowedToMerge
|
|
|
|
}
|
2022-03-31 17:53:08 +03:00
|
|
|
|
2022-05-03 22:46:28 +03:00
|
|
|
if manuallMerge {
|
|
|
|
// don't check rules to "auto merge", doer is going to mark this pull as merged manually
|
|
|
|
return nil
|
|
|
|
}
|
2022-03-31 17:53:08 +03:00
|
|
|
|
2022-05-03 22:46:28 +03:00
|
|
|
if pr.IsWorkInProgress() {
|
|
|
|
return ErrIsWorkInProgress
|
|
|
|
}
|
2022-03-31 17:53:08 +03:00
|
|
|
|
2022-07-13 11:22:51 +03:00
|
|
|
if !pr.CanAutoMerge() && !pr.IsEmpty() {
|
2022-05-03 22:46:28 +03:00
|
|
|
return ErrNotMergableState
|
|
|
|
}
|
2022-03-31 17:53:08 +03:00
|
|
|
|
2022-05-03 22:46:28 +03:00
|
|
|
if pr.IsChecking() {
|
|
|
|
return ErrIsChecking
|
|
|
|
}
|
2022-04-20 17:43:15 +03:00
|
|
|
|
2022-05-03 22:46:28 +03:00
|
|
|
if err := CheckPullBranchProtections(ctx, pr, false); err != nil {
|
2023-02-04 02:11:48 +03:00
|
|
|
if !models.IsErrDisallowedToMerge(err) {
|
|
|
|
log.Error("Error whilst checking pull branch protection for %-v: %v", pr, err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if !force {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if isRepoAdmin, err2 := access_model.IsUserRepoAdmin(ctx, pr.BaseRepo, doer); err2 != nil {
|
|
|
|
log.Error("Unable to check if %-v is a repo admin in %-v: %v", doer, pr.BaseRepo, err2)
|
|
|
|
return err2
|
|
|
|
} else if !isRepoAdmin {
|
2022-05-03 22:46:28 +03:00
|
|
|
return err
|
2022-03-31 17:53:08 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-03 22:46:28 +03:00
|
|
|
if _, err := isSignedIfRequired(ctx, pr, doer); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2022-03-31 17:53:08 +03:00
|
|
|
|
2022-06-13 12:37:59 +03:00
|
|
|
if noDeps, err := issues_model.IssueNoDependenciesLeft(ctx, pr.Issue); err != nil {
|
2022-05-03 22:46:28 +03:00
|
|
|
return err
|
|
|
|
} else if !noDeps {
|
|
|
|
return ErrDependenciesLeft
|
|
|
|
}
|
2022-03-31 17:53:08 +03:00
|
|
|
|
2022-05-03 22:46:28 +03:00
|
|
|
return nil
|
2022-11-12 23:18:50 +03:00
|
|
|
})
|
2022-03-31 17:53:08 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// isSignedIfRequired check if merge will be signed if required
|
2022-06-13 12:37:59 +03:00
|
|
|
func isSignedIfRequired(ctx context.Context, pr *issues_model.PullRequest, doer *user_model.User) (bool, error) {
|
2023-01-16 11:00:22 +03:00
|
|
|
pb, err := git_model.GetFirstMatchProtectedBranchRule(ctx, pr.BaseRepoID, pr.BaseBranch)
|
|
|
|
if err != nil {
|
2022-03-31 17:53:08 +03:00
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
|
2023-01-16 11:00:22 +03:00
|
|
|
if pb == nil || !pb.RequireSignedCommits {
|
2022-03-31 17:53:08 +03:00
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
sign, _, _, err := asymkey_service.SignMerge(ctx, pr, doer, pr.BaseRepo.RepoPath(), pr.BaseBranch, pr.GetGitRefName())
|
|
|
|
|
|
|
|
return sign, err
|
|
|
|
}
|
|
|
|
|
2019-12-07 05:44:10 +03:00
|
|
|
// checkAndUpdateStatus checks if pull request is possible to leaving checking status,
|
|
|
|
// and set to be either conflict or mergeable.
|
2022-11-19 11:12:33 +03:00
|
|
|
func checkAndUpdateStatus(ctx context.Context, pr *issues_model.PullRequest) {
|
2023-02-04 02:11:48 +03:00
|
|
|
// If status has not been changed to conflict by testPatch then we are mergeable
|
2022-06-13 12:37:59 +03:00
|
|
|
if pr.Status == issues_model.PullRequestStatusChecking {
|
|
|
|
pr.Status = issues_model.PullRequestStatusMergeable
|
2019-12-07 05:44:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Make sure there is no waiting test to process before leaving the checking status.
|
2022-05-02 02:54:44 +03:00
|
|
|
has, err := prPatchCheckerQueue.Has(strconv.FormatInt(pr.ID, 10))
|
2020-02-03 02:19:58 +03:00
|
|
|
if err != nil {
|
2023-02-04 02:11:48 +03:00
|
|
|
log.Error("Unable to check if the queue is waiting to reprocess %-v. Error: %v", pr, err)
|
2020-02-03 02:19:58 +03:00
|
|
|
}
|
|
|
|
|
2023-02-04 02:11:48 +03:00
|
|
|
if has {
|
|
|
|
log.Trace("Not updating status for %-v as it is due to be rechecked", pr)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := pr.UpdateColsIfNotMerged(ctx, "merge_base", "status", "conflicted_files", "changed_protected_files"); err != nil {
|
|
|
|
log.Error("Update[%-v]: %v", pr, err)
|
2019-12-07 05:44:10 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-04 02:11:48 +03:00
|
|
|
// getMergeCommit checks if a pull request has been merged
|
2019-12-07 05:44:10 +03:00
|
|
|
// Returns the git.Commit of the pull request if merged
|
2022-06-13 12:37:59 +03:00
|
|
|
func getMergeCommit(ctx context.Context, pr *issues_model.PullRequest) (*git.Commit, error) {
|
2023-02-04 02:11:48 +03:00
|
|
|
if err := pr.LoadBaseRepo(ctx); err != nil {
|
|
|
|
return nil, fmt.Errorf("unable to load base repo for %s: %w", pr, err)
|
2019-12-16 08:17:55 +03:00
|
|
|
}
|
2019-12-07 05:44:10 +03:00
|
|
|
|
2023-02-04 02:11:48 +03:00
|
|
|
prHeadRef := pr.GetGitRefName()
|
2019-12-07 05:44:10 +03:00
|
|
|
|
2023-02-04 02:11:48 +03:00
|
|
|
// Check if the pull request is merged into BaseBranch
|
|
|
|
if _, _, err := git.NewCommand(ctx, "merge-base", "--is-ancestor").
|
|
|
|
AddDynamicArguments(prHeadRef, pr.BaseBranch).
|
|
|
|
RunStdString(&git.RunOpts{Dir: pr.BaseRepo.RepoPath()}); err != nil {
|
2019-12-07 05:44:10 +03:00
|
|
|
if strings.Contains(err.Error(), "exit status 1") {
|
2023-02-04 02:11:48 +03:00
|
|
|
// prHeadRef is not an ancestor of the base branch
|
2019-12-07 05:44:10 +03:00
|
|
|
return nil, nil
|
|
|
|
}
|
2023-02-04 02:11:48 +03:00
|
|
|
// Errors are signaled by a non-zero status that is not 1
|
|
|
|
return nil, fmt.Errorf("%-v git merge-base --is-ancestor: %w", pr, err)
|
2019-12-07 05:44:10 +03:00
|
|
|
}
|
|
|
|
|
2023-02-04 02:11:48 +03:00
|
|
|
// If merge-base successfully exits then prHeadRef is an ancestor of pr.BaseBranch
|
|
|
|
|
|
|
|
// Find the head commit id
|
|
|
|
prHeadCommitID, err := git.GetFullCommitID(ctx, pr.BaseRepo.RepoPath(), prHeadRef)
|
2019-12-07 05:44:10 +03:00
|
|
|
if err != nil {
|
2023-02-04 02:11:48 +03:00
|
|
|
return nil, fmt.Errorf("GetFullCommitID(%s) in %s: %w", prHeadRef, pr.BaseRepo.FullName(), err)
|
2019-12-07 05:44:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get the commit from BaseBranch where the pull request got merged
|
2023-02-04 02:11:48 +03:00
|
|
|
mergeCommit, _, err := git.NewCommand(ctx, "rev-list", "--ancestry-path", "--merges", "--reverse").
|
|
|
|
AddDynamicArguments(prHeadCommitID + ".." + pr.BaseBranch).
|
|
|
|
RunStdString(&git.RunOpts{Dir: pr.BaseRepo.RepoPath()})
|
2019-12-07 05:44:10 +03:00
|
|
|
if err != nil {
|
2022-10-24 22:29:17 +03:00
|
|
|
return nil, fmt.Errorf("git rev-list --ancestry-path --merges --reverse: %w", err)
|
2022-12-27 16:12:49 +03:00
|
|
|
} else if len(mergeCommit) < git.SHAFullLength {
|
2021-03-04 06:41:23 +03:00
|
|
|
// PR was maybe fast-forwarded, so just use last commit of PR
|
2023-02-04 02:11:48 +03:00
|
|
|
mergeCommit = prHeadCommitID
|
2019-12-07 05:44:10 +03:00
|
|
|
}
|
2023-02-04 02:11:48 +03:00
|
|
|
mergeCommit = strings.TrimSpace(mergeCommit)
|
2019-12-07 05:44:10 +03:00
|
|
|
|
2022-03-29 22:13:41 +03:00
|
|
|
gitRepo, err := git.OpenRepository(ctx, pr.BaseRepo.RepoPath())
|
2019-12-07 05:44:10 +03:00
|
|
|
if err != nil {
|
2023-02-04 02:11:48 +03:00
|
|
|
return nil, fmt.Errorf("%-v OpenRepository: %w", pr.BaseRepo, err)
|
2019-12-07 05:44:10 +03:00
|
|
|
}
|
|
|
|
defer gitRepo.Close()
|
|
|
|
|
2023-02-04 02:11:48 +03:00
|
|
|
commit, err := gitRepo.GetCommit(mergeCommit)
|
2019-12-07 05:44:10 +03:00
|
|
|
if err != nil {
|
2023-02-04 02:11:48 +03:00
|
|
|
return nil, fmt.Errorf("GetMergeCommit[%s]: %w", mergeCommit, err)
|
2019-12-07 05:44:10 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return commit, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// manuallyMerged checks if a pull request got manually merged
|
|
|
|
// When a pull request got manually merged mark the pull request as merged
|
2022-06-13 12:37:59 +03:00
|
|
|
func manuallyMerged(ctx context.Context, pr *issues_model.PullRequest) bool {
|
2022-11-19 11:12:33 +03:00
|
|
|
if err := pr.LoadBaseRepo(ctx); err != nil {
|
2023-02-04 02:11:48 +03:00
|
|
|
log.Error("%-v LoadBaseRepo: %v", pr, err)
|
2021-03-04 06:41:23 +03:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2022-12-10 05:46:31 +03:00
|
|
|
if unit, err := pr.BaseRepo.GetUnit(ctx, unit.TypePullRequests); err == nil {
|
2021-03-04 06:41:23 +03:00
|
|
|
config := unit.PullRequestsConfig()
|
|
|
|
if !config.AutodetectManualMerge {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
} else {
|
2023-02-04 02:11:48 +03:00
|
|
|
log.Error("%-v BaseRepo.GetUnit(unit.TypePullRequests): %v", pr, err)
|
2021-03-04 06:41:23 +03:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2022-01-20 02:26:57 +03:00
|
|
|
commit, err := getMergeCommit(ctx, pr)
|
2019-12-07 05:44:10 +03:00
|
|
|
if err != nil {
|
2023-02-04 02:11:48 +03:00
|
|
|
log.Error("%-v getMergeCommit: %v", pr, err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if commit == nil {
|
|
|
|
// no merge commit found
|
2019-12-07 05:44:10 +03:00
|
|
|
return false
|
|
|
|
}
|
2023-02-04 02:11:48 +03:00
|
|
|
|
|
|
|
pr.MergedCommitID = commit.ID.String()
|
|
|
|
pr.MergedUnix = timeutil.TimeStamp(commit.Author.When.Unix())
|
|
|
|
pr.Status = issues_model.PullRequestStatusManuallyMerged
|
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 16:37:34 +03:00
|
|
|
merger, _ := user_model.GetUserByEmail(ctx, commit.Author.Email)
|
2023-02-04 02:11:48 +03:00
|
|
|
|
|
|
|
// When the commit author is unknown set the BaseRepo owner as merger
|
|
|
|
if merger == nil {
|
|
|
|
if pr.BaseRepo.Owner == nil {
|
|
|
|
if err = pr.BaseRepo.GetOwner(ctx); err != nil {
|
|
|
|
log.Error("%-v BaseRepo.GetOwner: %v", pr, err)
|
|
|
|
return false
|
2019-12-07 05:44:10 +03:00
|
|
|
}
|
|
|
|
}
|
2023-02-04 02:11:48 +03:00
|
|
|
merger = pr.BaseRepo.Owner
|
|
|
|
}
|
|
|
|
pr.Merger = merger
|
|
|
|
pr.MergerID = merger.ID
|
2019-12-07 05:44:10 +03:00
|
|
|
|
2023-02-04 02:11:48 +03:00
|
|
|
if merged, err := pr.SetMerged(ctx); err != nil {
|
|
|
|
log.Error("%-v setMerged : %v", pr, err)
|
|
|
|
return false
|
|
|
|
} else if !merged {
|
|
|
|
return false
|
|
|
|
}
|
2019-12-16 00:57:34 +03:00
|
|
|
|
2023-02-04 02:11:48 +03:00
|
|
|
notification.NotifyMergePullRequest(ctx, merger, pr)
|
2019-12-16 00:57:34 +03:00
|
|
|
|
2023-02-04 02:11:48 +03:00
|
|
|
log.Info("manuallyMerged[%-v]: Marked as manually merged into %s/%s by commit id: %s", pr, pr.BaseRepo.Name, pr.BaseBranch, commit.ID.String())
|
|
|
|
return true
|
2019-12-07 05:44:10 +03:00
|
|
|
}
|
|
|
|
|
2020-02-03 02:19:58 +03:00
|
|
|
// InitializePullRequests checks and tests untested patches of pull requests.
|
|
|
|
func InitializePullRequests(ctx context.Context) {
|
2022-06-13 12:37:59 +03:00
|
|
|
prs, err := issues_model.GetPullRequestIDsByCheckStatus(issues_model.PullRequestStatusChecking)
|
2020-02-03 02:19:58 +03:00
|
|
|
if err != nil {
|
|
|
|
log.Error("Find Checking PRs: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, prID := range prs {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2019-12-15 12:51:28 +03:00
|
|
|
return
|
2020-02-03 02:19:58 +03:00
|
|
|
default:
|
2022-05-02 02:54:44 +03:00
|
|
|
if err := prPatchCheckerQueue.PushFunc(strconv.FormatInt(prID, 10), func() error {
|
2023-02-04 02:11:48 +03:00
|
|
|
log.Trace("Adding PR[%d] to the pull requests patch checking queue", prID)
|
2020-02-03 02:19:58 +03:00
|
|
|
return nil
|
|
|
|
}); err != nil {
|
2023-02-04 02:11:48 +03:00
|
|
|
log.Error("Error adding PR[%d] to the pull requests patch checking queue %v", prID, err)
|
2019-12-15 12:51:28 +03:00
|
|
|
}
|
2019-12-07 05:44:10 +03:00
|
|
|
}
|
2020-02-03 02:19:58 +03:00
|
|
|
}
|
|
|
|
}
|
2019-12-07 05:44:10 +03:00
|
|
|
|
2020-02-03 02:19:58 +03:00
|
|
|
// handle passed PR IDs and test the PRs
|
2022-01-23 00:22:14 +03:00
|
|
|
func handle(data ...queue.Data) []queue.Data {
|
2020-02-03 02:19:58 +03:00
|
|
|
for _, datum := range data {
|
2020-12-25 12:59:32 +03:00
|
|
|
id, _ := strconv.ParseInt(datum.(string), 10, 64)
|
2019-12-15 12:51:28 +03:00
|
|
|
|
2022-01-20 02:26:57 +03:00
|
|
|
testPR(id)
|
|
|
|
}
|
2022-01-23 00:22:14 +03:00
|
|
|
return nil
|
2022-01-20 02:26:57 +03:00
|
|
|
}
|
2019-12-15 12:51:28 +03:00
|
|
|
|
2022-01-20 02:26:57 +03:00
|
|
|
func testPR(id int64) {
|
2022-05-04 19:06:23 +03:00
|
|
|
pullWorkingPool.CheckIn(fmt.Sprint(id))
|
|
|
|
defer pullWorkingPool.CheckOut(fmt.Sprint(id))
|
2022-01-20 02:26:57 +03:00
|
|
|
ctx, _, finished := process.GetManager().AddContext(graceful.GetManager().HammerContext(), fmt.Sprintf("Test PR[%d] from patch checking queue", id))
|
|
|
|
defer finished()
|
|
|
|
|
2022-06-13 12:37:59 +03:00
|
|
|
pr, err := issues_model.GetPullRequestByID(ctx, id)
|
2022-01-20 02:26:57 +03:00
|
|
|
if err != nil {
|
2023-02-04 02:11:48 +03:00
|
|
|
log.Error("Unable to GetPullRequestByID[%d] for testPR: %v", id, err)
|
2022-01-20 02:26:57 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2023-02-04 02:11:48 +03:00
|
|
|
log.Trace("Testing %-v", pr)
|
|
|
|
defer func() {
|
|
|
|
log.Trace("Done testing %-v (status: %s)", pr, pr.Status)
|
|
|
|
}()
|
|
|
|
|
2022-01-20 02:26:57 +03:00
|
|
|
if pr.HasMerged {
|
2023-02-04 02:11:48 +03:00
|
|
|
log.Trace("%-v is already merged (status: %s, merge commit: %s)", pr, pr.Status, pr.MergedCommitID)
|
2022-01-20 02:26:57 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if manuallyMerged(ctx, pr) {
|
2023-02-04 02:11:48 +03:00
|
|
|
log.Trace("%-v is manually merged (status: %s, merge commit: %s)", pr, pr.Status, pr.MergedCommitID)
|
2022-01-20 02:26:57 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := TestPatch(pr); err != nil {
|
2023-02-04 02:11:48 +03:00
|
|
|
log.Error("testPatch[%-v]: %v", pr, err)
|
2022-06-13 12:37:59 +03:00
|
|
|
pr.Status = issues_model.PullRequestStatusError
|
2022-01-20 02:26:57 +03:00
|
|
|
if err := pr.UpdateCols("status"); err != nil {
|
2023-02-04 02:11:48 +03:00
|
|
|
log.Error("update pr [%-v] status to PullRequestStatusError failed: %v", pr, err)
|
2019-12-07 05:44:10 +03:00
|
|
|
}
|
2022-01-20 02:26:57 +03:00
|
|
|
return
|
2019-12-07 05:44:10 +03:00
|
|
|
}
|
2022-11-19 11:12:33 +03:00
|
|
|
checkAndUpdateStatus(ctx, pr)
|
2019-12-07 05:44:10 +03:00
|
|
|
}
|
|
|
|
|
2023-01-16 11:00:22 +03:00
|
|
|
// CheckPRsForBaseBranch check all pulls with baseBrannch
|
|
|
|
func CheckPRsForBaseBranch(baseRepo *repo_model.Repository, baseBranchName string) error {
|
2022-06-13 12:37:59 +03:00
|
|
|
prs, err := issues_model.GetUnmergedPullRequestsByBaseInfo(baseRepo.ID, baseBranchName)
|
2020-10-13 21:50:57 +03:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, pr := range prs {
|
|
|
|
AddToTaskQueue(pr)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-12-07 05:44:10 +03:00
|
|
|
// Init runs the task queue to test all the checking status pull requests
|
2020-02-03 02:19:58 +03:00
|
|
|
func Init() error {
|
2022-05-02 02:54:44 +03:00
|
|
|
prPatchCheckerQueue = queue.CreateUniqueQueue("pr_patch_checker", handle, "")
|
2020-02-03 02:19:58 +03:00
|
|
|
|
2022-05-02 02:54:44 +03:00
|
|
|
if prPatchCheckerQueue == nil {
|
2020-02-03 02:19:58 +03:00
|
|
|
return fmt.Errorf("Unable to create pr_patch_checker Queue")
|
|
|
|
}
|
|
|
|
|
2022-05-02 02:54:44 +03:00
|
|
|
go graceful.GetManager().RunWithShutdownFns(prPatchCheckerQueue.Run)
|
2020-02-03 02:19:58 +03:00
|
|
|
go graceful.GetManager().RunWithShutdownContext(InitializePullRequests)
|
|
|
|
return nil
|
2019-12-07 05:44:10 +03:00
|
|
|
}
|