summaryrefslogtreecommitdiffstats
path: root/models/git
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-10-11 10:27:00 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-10-11 10:27:00 +0000
commit65aa53fc52ff15efe54df4147564828d535837f8 (patch)
tree31c51dad04fdcca80e6d3043c8bd49d2f1a51f83 /models/git
parentInitial commit. (diff)
downloadforgejo-65aa53fc52ff15efe54df4147564828d535837f8.tar.xz
forgejo-65aa53fc52ff15efe54df4147564828d535837f8.zip
Adding upstream version 8.0.3.HEADupstream/8.0.3upstreamdebian
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'models/git')
-rw-r--r--models/git/TestIterateRepositoryIDsWithLFSMetaObjects/lfs_meta_object.yaml7
-rw-r--r--models/git/branch.go427
-rw-r--r--models/git/branch_list.go132
-rw-r--r--models/git/branch_test.go195
-rw-r--r--models/git/commit_status.go473
-rw-r--r--models/git/commit_status_summary.go88
-rw-r--r--models/git/commit_status_test.go242
-rw-r--r--models/git/lfs.go419
-rw-r--r--models/git/lfs_lock.go209
-rw-r--r--models/git/lfs_lock_list.go54
-rw-r--r--models/git/lfs_test.go102
-rw-r--r--models/git/main_test.go18
-rw-r--r--models/git/protected_banch_list_test.go77
-rw-r--r--models/git/protected_branch.go511
-rw-r--r--models/git/protected_branch_list.go95
-rw-r--r--models/git/protected_branch_test.go78
-rw-r--r--models/git/protected_tag.go150
-rw-r--r--models/git/protected_tag_test.go166
18 files changed, 3443 insertions, 0 deletions
diff --git a/models/git/TestIterateRepositoryIDsWithLFSMetaObjects/lfs_meta_object.yaml b/models/git/TestIterateRepositoryIDsWithLFSMetaObjects/lfs_meta_object.yaml
new file mode 100644
index 00000000..fdfa66a2
--- /dev/null
+++ b/models/git/TestIterateRepositoryIDsWithLFSMetaObjects/lfs_meta_object.yaml
@@ -0,0 +1,7 @@
+-
+
+ id: 1000
+ oid: 9d172e5c64b4f0024b9901ec6afe9ea052f3c9b6ff9f4b07956d8c48c86fca82
+ size: 25
+ repository_id: 1
+ created_unix: 1712309123
diff --git a/models/git/branch.go b/models/git/branch.go
new file mode 100644
index 00000000..7e1c96d7
--- /dev/null
+++ b/models/git/branch.go
@@ -0,0 +1,427 @@
+// Copyright 2016 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrBranchNotExist represents an error that branch with such name does not exist.
+type ErrBranchNotExist struct {
+ RepoID int64
+ BranchName string
+}
+
+// IsErrBranchNotExist checks if an error is an ErrBranchDoesNotExist.
+func IsErrBranchNotExist(err error) bool {
+ _, ok := err.(ErrBranchNotExist)
+ return ok
+}
+
+func (err ErrBranchNotExist) Error() string {
+ return fmt.Sprintf("branch does not exist [repo_id: %d name: %s]", err.RepoID, err.BranchName)
+}
+
+func (err ErrBranchNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrBranchAlreadyExists represents an error that branch with such name already exists.
+type ErrBranchAlreadyExists struct {
+ BranchName string
+}
+
+// IsErrBranchAlreadyExists checks if an error is an ErrBranchAlreadyExists.
+func IsErrBranchAlreadyExists(err error) bool {
+ _, ok := err.(ErrBranchAlreadyExists)
+ return ok
+}
+
+func (err ErrBranchAlreadyExists) Error() string {
+ return fmt.Sprintf("branch already exists [name: %s]", err.BranchName)
+}
+
+func (err ErrBranchAlreadyExists) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrBranchNameConflict represents an error that branch name conflicts with other branch.
+type ErrBranchNameConflict struct {
+ BranchName string
+}
+
+// IsErrBranchNameConflict checks if an error is an ErrBranchNameConflict.
+func IsErrBranchNameConflict(err error) bool {
+ _, ok := err.(ErrBranchNameConflict)
+ return ok
+}
+
+func (err ErrBranchNameConflict) Error() string {
+ return fmt.Sprintf("branch conflicts with existing branch [name: %s]", err.BranchName)
+}
+
+func (err ErrBranchNameConflict) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrBranchesEqual represents an error that base branch is equal to the head branch.
+type ErrBranchesEqual struct {
+ BaseBranchName string
+ HeadBranchName string
+}
+
+// IsErrBranchesEqual checks if an error is an ErrBranchesEqual.
+func IsErrBranchesEqual(err error) bool {
+ _, ok := err.(ErrBranchesEqual)
+ return ok
+}
+
+func (err ErrBranchesEqual) Error() string {
+ return fmt.Sprintf("branches are equal [head: %sm base: %s]", err.HeadBranchName, err.BaseBranchName)
+}
+
+func (err ErrBranchesEqual) Unwrap() error {
+ return util.ErrInvalidArgument
+}
+
+// Branch represents a branch of a repository
+// For those repository who have many branches, stored into database is a good choice
+// for pagination, keyword search and filtering
+type Branch struct {
+ ID int64
+ RepoID int64 `xorm:"UNIQUE(s)"`
+ Name string `xorm:"UNIQUE(s) NOT NULL"` // git's ref-name is case-sensitive internally, however, in some databases (mysql, by default), it's case-insensitive at the moment
+ CommitID string
+ CommitMessage string `xorm:"TEXT"` // it only stores the message summary (the first line)
+ PusherID int64
+ Pusher *user_model.User `xorm:"-"`
+ IsDeleted bool `xorm:"index"`
+ DeletedByID int64
+ DeletedBy *user_model.User `xorm:"-"`
+ DeletedUnix timeutil.TimeStamp `xorm:"index"`
+ CommitTime timeutil.TimeStamp // The commit
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
+}
+
+func (b *Branch) LoadDeletedBy(ctx context.Context) (err error) {
+ if b.DeletedBy == nil {
+ b.DeletedBy, err = user_model.GetUserByID(ctx, b.DeletedByID)
+ if user_model.IsErrUserNotExist(err) {
+ b.DeletedBy = user_model.NewGhostUser()
+ err = nil
+ }
+ }
+ return err
+}
+
+func (b *Branch) GetRepo(ctx context.Context) (*repo_model.Repository, error) {
+ return repo_model.GetRepositoryByID(ctx, b.RepoID)
+}
+
+func (b *Branch) LoadPusher(ctx context.Context) (err error) {
+ if b.Pusher == nil && b.PusherID > 0 {
+ b.Pusher, err = user_model.GetUserByID(ctx, b.PusherID)
+ if user_model.IsErrUserNotExist(err) {
+ b.Pusher = user_model.NewGhostUser()
+ err = nil
+ }
+ }
+ return err
+}
+
+func init() {
+ db.RegisterModel(new(Branch))
+ db.RegisterModel(new(RenamedBranch))
+}
+
+func GetBranch(ctx context.Context, repoID int64, branchName string) (*Branch, error) {
+ var branch Branch
+ has, err := db.GetEngine(ctx).Where("repo_id=?", repoID).And("name=?", branchName).Get(&branch)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrBranchNotExist{
+ RepoID: repoID,
+ BranchName: branchName,
+ }
+ }
+ return &branch, nil
+}
+
+func GetBranches(ctx context.Context, repoID int64, branchNames []string) ([]*Branch, error) {
+ branches := make([]*Branch, 0, len(branchNames))
+ return branches, db.GetEngine(ctx).Where("repo_id=?", repoID).In("name", branchNames).Find(&branches)
+}
+
+func AddBranches(ctx context.Context, branches []*Branch) error {
+ for _, branch := range branches {
+ if _, err := db.GetEngine(ctx).Insert(branch); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func GetDeletedBranchByID(ctx context.Context, repoID, branchID int64) (*Branch, error) {
+ var branch Branch
+ has, err := db.GetEngine(ctx).ID(branchID).Get(&branch)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrBranchNotExist{
+ RepoID: repoID,
+ }
+ }
+ if branch.RepoID != repoID {
+ return nil, ErrBranchNotExist{
+ RepoID: repoID,
+ }
+ }
+ if !branch.IsDeleted {
+ return nil, ErrBranchNotExist{
+ RepoID: repoID,
+ }
+ }
+ return &branch, nil
+}
+
+func DeleteBranches(ctx context.Context, repoID, doerID int64, branchIDs []int64) error {
+ return db.WithTx(ctx, func(ctx context.Context) error {
+ branches := make([]*Branch, 0, len(branchIDs))
+ if err := db.GetEngine(ctx).In("id", branchIDs).Find(&branches); err != nil {
+ return err
+ }
+ for _, branch := range branches {
+ if err := AddDeletedBranch(ctx, repoID, branch.Name, doerID); err != nil {
+ return err
+ }
+ }
+ return nil
+ })
+}
+
+// UpdateBranch updates the branch information in the database.
+func UpdateBranch(ctx context.Context, repoID, pusherID int64, branchName string, commit *git.Commit) (int64, error) {
+ return db.GetEngine(ctx).Where("repo_id=? AND name=?", repoID, branchName).
+ Cols("commit_id, commit_message, pusher_id, commit_time, is_deleted, updated_unix").
+ Update(&Branch{
+ CommitID: commit.ID.String(),
+ CommitMessage: commit.Summary(),
+ PusherID: pusherID,
+ CommitTime: timeutil.TimeStamp(commit.Committer.When.Unix()),
+ IsDeleted: false,
+ })
+}
+
+// AddDeletedBranch adds a deleted branch to the database
+func AddDeletedBranch(ctx context.Context, repoID int64, branchName string, deletedByID int64) error {
+ branch, err := GetBranch(ctx, repoID, branchName)
+ if err != nil {
+ return err
+ }
+ if branch.IsDeleted {
+ return nil
+ }
+
+ cnt, err := db.GetEngine(ctx).Where("repo_id=? AND name=? AND is_deleted=?", repoID, branchName, false).
+ Cols("is_deleted, deleted_by_id, deleted_unix").
+ Update(&Branch{
+ IsDeleted: true,
+ DeletedByID: deletedByID,
+ DeletedUnix: timeutil.TimeStampNow(),
+ })
+ if err != nil {
+ return err
+ }
+ if cnt == 0 {
+ return fmt.Errorf("branch %s not found or has been deleted", branchName)
+ }
+ return err
+}
+
+func RemoveDeletedBranchByID(ctx context.Context, repoID, branchID int64) error {
+ _, err := db.GetEngine(ctx).Where("repo_id=? AND id=? AND is_deleted = ?", repoID, branchID, true).Delete(new(Branch))
+ return err
+}
+
+// RemoveOldDeletedBranches removes old deleted branches
+func RemoveOldDeletedBranches(ctx context.Context, olderThan time.Duration) {
+ // Nothing to do for shutdown or terminate
+ log.Trace("Doing: DeletedBranchesCleanup")
+
+ deleteBefore := time.Now().Add(-olderThan)
+ _, err := db.GetEngine(ctx).Where("is_deleted=? AND deleted_unix < ?", true, deleteBefore.Unix()).Delete(new(Branch))
+ if err != nil {
+ log.Error("DeletedBranchesCleanup: %v", err)
+ }
+}
+
+// RenamedBranch provide renamed branch log
+// will check it when a branch can't be found
+type RenamedBranch struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX NOT NULL"`
+ From string
+ To string
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+}
+
+// FindRenamedBranch check if a branch was renamed
+func FindRenamedBranch(ctx context.Context, repoID int64, from string) (branch *RenamedBranch, exist bool, err error) {
+ branch = &RenamedBranch{
+ RepoID: repoID,
+ From: from,
+ }
+ exist, err = db.GetEngine(ctx).Get(branch)
+
+ return branch, exist, err
+}
+
+// RenameBranch rename a branch
+func RenameBranch(ctx context.Context, repo *repo_model.Repository, from, to string, gitAction func(ctx context.Context, isDefault bool) error) (err error) {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ sess := db.GetEngine(ctx)
+
+ // check whether from branch exist
+ var branch Branch
+ exist, err := db.GetEngine(ctx).Where("repo_id=? AND name=?", repo.ID, from).Get(&branch)
+ if err != nil {
+ return err
+ } else if !exist || branch.IsDeleted {
+ return ErrBranchNotExist{
+ RepoID: repo.ID,
+ BranchName: from,
+ }
+ }
+
+ // check whether to branch exist or is_deleted
+ var dstBranch Branch
+ exist, err = db.GetEngine(ctx).Where("repo_id=? AND name=?", repo.ID, to).Get(&dstBranch)
+ if err != nil {
+ return err
+ }
+ if exist {
+ if !dstBranch.IsDeleted {
+ return ErrBranchAlreadyExists{
+ BranchName: to,
+ }
+ }
+
+ if _, err := db.GetEngine(ctx).ID(dstBranch.ID).NoAutoCondition().Delete(&dstBranch); err != nil {
+ return err
+ }
+ }
+
+ // 1. update branch in database
+ if n, err := sess.Where("repo_id=? AND name=?", repo.ID, from).Update(&Branch{
+ Name: to,
+ }); err != nil {
+ return err
+ } else if n <= 0 {
+ return ErrBranchNotExist{
+ RepoID: repo.ID,
+ BranchName: from,
+ }
+ }
+
+ // 2. update default branch if needed
+ isDefault := repo.DefaultBranch == from
+ if isDefault {
+ repo.DefaultBranch = to
+ _, err = sess.ID(repo.ID).Cols("default_branch").Update(repo)
+ if err != nil {
+ return err
+ }
+ }
+
+ // 3. Update protected branch if needed
+ protectedBranch, err := GetProtectedBranchRuleByName(ctx, repo.ID, from)
+ if err != nil {
+ return err
+ }
+
+ if protectedBranch != nil {
+ // there is a protect rule for this branch
+ protectedBranch.RuleName = to
+ _, err = sess.ID(protectedBranch.ID).Cols("branch_name").Update(protectedBranch)
+ if err != nil {
+ return err
+ }
+ } else {
+ // some glob protect rules may match this branch
+ protected, err := IsBranchProtected(ctx, repo.ID, from)
+ if err != nil {
+ return err
+ }
+ if protected {
+ return ErrBranchIsProtected
+ }
+ }
+
+ // 4. Update all not merged pull request base branch name
+ _, err = sess.Table("pull_request").Where("base_repo_id=? AND base_branch=? AND has_merged=?",
+ repo.ID, from, false).
+ Update(map[string]any{"base_branch": to})
+ if err != nil {
+ return err
+ }
+
+ // 5. insert renamed branch record
+ renamedBranch := &RenamedBranch{
+ RepoID: repo.ID,
+ From: from,
+ To: to,
+ }
+ err = db.Insert(ctx, renamedBranch)
+ if err != nil {
+ return err
+ }
+
+ // 6. do git action
+ if err = gitAction(ctx, isDefault); err != nil {
+ return err
+ }
+
+ return committer.Commit()
+}
+
+// FindRecentlyPushedNewBranches return at most 2 new branches pushed by the user in 6 hours which has no opened PRs created
+// except the indicate branch
+func FindRecentlyPushedNewBranches(ctx context.Context, repoID, userID int64, excludeBranchName string) (BranchList, error) {
+ branches := make(BranchList, 0, 2)
+ subQuery := builder.Select("head_branch").From("pull_request").
+ InnerJoin("issue", "issue.id = pull_request.issue_id").
+ Where(builder.Eq{
+ "pull_request.head_repo_id": repoID,
+ "issue.is_closed": false,
+ })
+ err := db.GetEngine(ctx).
+ Where("pusher_id=? AND is_deleted=?", userID, false).
+ And("name <> ?", excludeBranchName).
+ And("repo_id = ?", repoID).
+ And("commit_time >= ?", time.Now().Add(-time.Hour*6).Unix()).
+ NotIn("name", subQuery).
+ OrderBy("branch.commit_time DESC").
+ Limit(2).
+ Find(&branches)
+ return branches, err
+}
diff --git a/models/git/branch_list.go b/models/git/branch_list.go
new file mode 100644
index 00000000..81a43eae
--- /dev/null
+++ b/models/git/branch_list.go
@@ -0,0 +1,132 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+ "code.gitea.io/gitea/modules/optional"
+
+ "xorm.io/builder"
+)
+
+type BranchList []*Branch
+
+func (branches BranchList) LoadDeletedBy(ctx context.Context) error {
+ ids := container.FilterSlice(branches, func(branch *Branch) (int64, bool) {
+ return branch.DeletedByID, branch.IsDeleted
+ })
+
+ usersMap := make(map[int64]*user_model.User, len(ids))
+ if err := db.GetEngine(ctx).In("id", ids).Find(&usersMap); err != nil {
+ return err
+ }
+ for _, branch := range branches {
+ if !branch.IsDeleted {
+ continue
+ }
+ branch.DeletedBy = usersMap[branch.DeletedByID]
+ if branch.DeletedBy == nil {
+ branch.DeletedBy = user_model.NewGhostUser()
+ }
+ }
+ return nil
+}
+
+func (branches BranchList) LoadPusher(ctx context.Context) error {
+ ids := container.FilterSlice(branches, func(branch *Branch) (int64, bool) {
+ // pusher_id maybe zero because some branches are sync by backend with no pusher
+ return branch.PusherID, branch.PusherID > 0
+ })
+
+ usersMap := make(map[int64]*user_model.User, len(ids))
+ if err := db.GetEngine(ctx).In("id", ids).Find(&usersMap); err != nil {
+ return err
+ }
+ for _, branch := range branches {
+ if branch.PusherID <= 0 {
+ continue
+ }
+ branch.Pusher = usersMap[branch.PusherID]
+ if branch.Pusher == nil {
+ branch.Pusher = user_model.NewGhostUser()
+ }
+ }
+ return nil
+}
+
+type FindBranchOptions struct {
+ db.ListOptions
+ RepoID int64
+ ExcludeBranchNames []string
+ IsDeletedBranch optional.Option[bool]
+ OrderBy string
+ Keyword string
+}
+
+func (opts FindBranchOptions) ToConds() builder.Cond {
+ cond := builder.NewCond()
+ if opts.RepoID > 0 {
+ cond = cond.And(builder.Eq{"repo_id": opts.RepoID})
+ }
+
+ if len(opts.ExcludeBranchNames) > 0 {
+ cond = cond.And(builder.NotIn("name", opts.ExcludeBranchNames))
+ }
+ if opts.IsDeletedBranch.Has() {
+ cond = cond.And(builder.Eq{"is_deleted": opts.IsDeletedBranch.Value()})
+ }
+ if opts.Keyword != "" {
+ cond = cond.And(builder.Like{"name", opts.Keyword})
+ }
+ return cond
+}
+
+func (opts FindBranchOptions) ToOrders() string {
+ orderBy := opts.OrderBy
+ if orderBy == "" {
+ // the commit_time might be the same, so add the "name" to make sure the order is stable
+ orderBy = "commit_time DESC, name ASC"
+ }
+ if opts.IsDeletedBranch.ValueOrDefault(true) { // if deleted branch included, put them at the beginning
+ orderBy = "is_deleted ASC, " + orderBy
+ }
+ return orderBy
+}
+
+func FindBranchNames(ctx context.Context, opts FindBranchOptions) ([]string, error) {
+ sess := db.GetEngine(ctx).Select("name").Where(opts.ToConds())
+ if opts.PageSize > 0 && !opts.IsListAll() {
+ sess = db.SetSessionPagination(sess, &opts.ListOptions)
+ }
+
+ var branches []string
+ if err := sess.Table("branch").OrderBy(opts.ToOrders()).Find(&branches); err != nil {
+ return nil, err
+ }
+ return branches, nil
+}
+
+func FindBranchesByRepoAndBranchName(ctx context.Context, repoBranches map[int64]string) (map[int64]string, error) {
+ if len(repoBranches) == 0 {
+ return nil, nil
+ }
+ cond := builder.NewCond()
+ for repoID, branchName := range repoBranches {
+ cond = cond.Or(builder.And(builder.Eq{"repo_id": repoID}, builder.Eq{"name": branchName}))
+ }
+ var branches []*Branch
+ if err := db.GetEngine(ctx).
+ Where(cond).Find(&branches); err != nil {
+ return nil, err
+ }
+ branchMap := make(map[int64]string, len(branches))
+ for _, branch := range branches {
+ branchMap[branch.RepoID] = branch.CommitID
+ }
+ return branchMap, nil
+}
diff --git a/models/git/branch_test.go b/models/git/branch_test.go
new file mode 100644
index 00000000..81839eb7
--- /dev/null
+++ b/models/git/branch_test.go
@@ -0,0 +1,195 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git_test
+
+import (
+ "context"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ git_model "code.gitea.io/gitea/models/git"
+ issues_model "code.gitea.io/gitea/models/issues"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/optional"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAddDeletedBranch(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ assert.EqualValues(t, git.Sha1ObjectFormat.Name(), repo.ObjectFormatName)
+ firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{ID: 1})
+
+ assert.True(t, firstBranch.IsDeleted)
+ require.NoError(t, git_model.AddDeletedBranch(db.DefaultContext, repo.ID, firstBranch.Name, firstBranch.DeletedByID))
+ require.NoError(t, git_model.AddDeletedBranch(db.DefaultContext, repo.ID, "branch2", int64(1)))
+
+ secondBranch := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{RepoID: repo.ID, Name: "branch2"})
+ assert.True(t, secondBranch.IsDeleted)
+
+ commit := &git.Commit{
+ ID: git.MustIDFromString(secondBranch.CommitID),
+ CommitMessage: secondBranch.CommitMessage,
+ Committer: &git.Signature{
+ When: secondBranch.CommitTime.AsLocalTime(),
+ },
+ }
+
+ _, err := git_model.UpdateBranch(db.DefaultContext, repo.ID, secondBranch.PusherID, secondBranch.Name, commit)
+ require.NoError(t, err)
+}
+
+func TestGetDeletedBranches(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+
+ branches, err := db.Find[git_model.Branch](db.DefaultContext, git_model.FindBranchOptions{
+ ListOptions: db.ListOptionsAll,
+ RepoID: repo.ID,
+ IsDeletedBranch: optional.Some(true),
+ })
+ require.NoError(t, err)
+ assert.Len(t, branches, 2)
+}
+
+func TestGetDeletedBranch(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{ID: 1})
+
+ assert.NotNil(t, getDeletedBranch(t, firstBranch))
+}
+
+func TestDeletedBranchLoadUser(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{ID: 1})
+ secondBranch := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{ID: 2})
+
+ branch := getDeletedBranch(t, firstBranch)
+ assert.Nil(t, branch.DeletedBy)
+ branch.LoadDeletedBy(db.DefaultContext)
+ assert.NotNil(t, branch.DeletedBy)
+ assert.Equal(t, "user1", branch.DeletedBy.Name)
+
+ branch = getDeletedBranch(t, secondBranch)
+ assert.Nil(t, branch.DeletedBy)
+ branch.LoadDeletedBy(db.DefaultContext)
+ assert.NotNil(t, branch.DeletedBy)
+ assert.Equal(t, "Ghost", branch.DeletedBy.Name)
+}
+
+func TestRemoveDeletedBranch(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+
+ firstBranch := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{ID: 1})
+
+ err := git_model.RemoveDeletedBranchByID(db.DefaultContext, repo.ID, 1)
+ require.NoError(t, err)
+ unittest.AssertNotExistsBean(t, firstBranch)
+ unittest.AssertExistsAndLoadBean(t, &git_model.Branch{ID: 2})
+}
+
+func getDeletedBranch(t *testing.T, branch *git_model.Branch) *git_model.Branch {
+ repo := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+
+ deletedBranch, err := git_model.GetDeletedBranchByID(db.DefaultContext, repo.ID, branch.ID)
+ require.NoError(t, err)
+ assert.Equal(t, branch.ID, deletedBranch.ID)
+ assert.Equal(t, branch.Name, deletedBranch.Name)
+ assert.Equal(t, branch.CommitID, deletedBranch.CommitID)
+ assert.Equal(t, branch.DeletedByID, deletedBranch.DeletedByID)
+
+ return deletedBranch
+}
+
+func TestFindRenamedBranch(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ branch, exist, err := git_model.FindRenamedBranch(db.DefaultContext, 1, "dev")
+ require.NoError(t, err)
+ assert.True(t, exist)
+ assert.Equal(t, "master", branch.To)
+
+ _, exist, err = git_model.FindRenamedBranch(db.DefaultContext, 1, "unknow")
+ require.NoError(t, err)
+ assert.False(t, exist)
+}
+
+func TestRenameBranch(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ _isDefault := false
+
+ ctx, committer, err := db.TxContext(db.DefaultContext)
+ defer committer.Close()
+ require.NoError(t, err)
+ require.NoError(t, git_model.UpdateProtectBranch(ctx, repo1, &git_model.ProtectedBranch{
+ RepoID: repo1.ID,
+ RuleName: "master",
+ }, git_model.WhitelistOptions{}))
+ require.NoError(t, committer.Commit())
+
+ require.NoError(t, git_model.RenameBranch(db.DefaultContext, repo1, "master", "main", func(ctx context.Context, isDefault bool) error {
+ _isDefault = isDefault
+ return nil
+ }))
+
+ assert.True(t, _isDefault)
+ repo1 = unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+ assert.Equal(t, "main", repo1.DefaultBranch)
+
+ pull := unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 1}) // merged
+ assert.Equal(t, "master", pull.BaseBranch)
+
+ pull = unittest.AssertExistsAndLoadBean(t, &issues_model.PullRequest{ID: 2}) // open
+ assert.Equal(t, "main", pull.BaseBranch)
+
+ renamedBranch := unittest.AssertExistsAndLoadBean(t, &git_model.RenamedBranch{ID: 2})
+ assert.Equal(t, "master", renamedBranch.From)
+ assert.Equal(t, "main", renamedBranch.To)
+ assert.Equal(t, int64(1), renamedBranch.RepoID)
+
+ unittest.AssertExistsAndLoadBean(t, &git_model.ProtectedBranch{
+ RepoID: repo1.ID,
+ RuleName: "main",
+ })
+}
+
+func TestOnlyGetDeletedBranchOnCorrectRepo(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // Get deletedBranch with ID of 1 on repo with ID 2.
+ // This should return a nil branch as this deleted branch
+ // is actually on repo with ID 1.
+ repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 2})
+
+ deletedBranch, err := git_model.GetDeletedBranchByID(db.DefaultContext, repo2.ID, 1)
+
+ // Expect error, and the returned branch is nil.
+ require.Error(t, err)
+ assert.Nil(t, deletedBranch)
+
+ // Now get the deletedBranch with ID of 1 on repo with ID 1.
+ // This should return the deletedBranch.
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+
+ deletedBranch, err = git_model.GetDeletedBranchByID(db.DefaultContext, repo1.ID, 1)
+
+ // Expect no error, and the returned branch to be not nil.
+ require.NoError(t, err)
+ assert.NotNil(t, deletedBranch)
+}
+
+func TestFindBranchesByRepoAndBranchName(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ // With no repos or branches given, we find no branches.
+ branches, err := git_model.FindBranchesByRepoAndBranchName(db.DefaultContext, map[int64]string{})
+ require.NoError(t, err)
+ assert.Empty(t, branches)
+}
diff --git a/models/git/commit_status.go b/models/git/commit_status.go
new file mode 100644
index 00000000..d975f057
--- /dev/null
+++ b/models/git/commit_status.go
@@ -0,0 +1,473 @@
+// Copyright 2017 Gitea. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+ "crypto/sha1"
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ asymkey_model "code.gitea.io/gitea/models/asymkey"
+ "code.gitea.io/gitea/models/db"
+ repo_model "code.gitea.io/gitea/models/repo"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/translation"
+
+ "xorm.io/builder"
+ "xorm.io/xorm"
+)
+
+// CommitStatus holds a single Status of a single Commit
+type CommitStatus struct {
+ ID int64 `xorm:"pk autoincr"`
+ Index int64 `xorm:"INDEX UNIQUE(repo_sha_index)"`
+ RepoID int64 `xorm:"INDEX UNIQUE(repo_sha_index)"`
+ Repo *repo_model.Repository `xorm:"-"`
+ State api.CommitStatusState `xorm:"VARCHAR(7) NOT NULL"`
+ SHA string `xorm:"VARCHAR(64) NOT NULL INDEX UNIQUE(repo_sha_index)"`
+ TargetURL string `xorm:"TEXT"`
+ Description string `xorm:"TEXT"`
+ ContextHash string `xorm:"VARCHAR(64) index"`
+ Context string `xorm:"TEXT"`
+ Creator *user_model.User `xorm:"-"`
+ CreatorID int64
+
+ CreatedUnix timeutil.TimeStamp `xorm:"INDEX created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+}
+
+func init() {
+ db.RegisterModel(new(CommitStatus))
+ db.RegisterModel(new(CommitStatusIndex))
+}
+
+func postgresGetCommitStatusIndex(ctx context.Context, repoID int64, sha string) (int64, error) {
+ res, err := db.GetEngine(ctx).Query("INSERT INTO `commit_status_index` (repo_id, sha, max_index) "+
+ "VALUES (?,?,1) ON CONFLICT (repo_id, sha) DO UPDATE SET max_index = `commit_status_index`.max_index+1 RETURNING max_index",
+ repoID, sha)
+ if err != nil {
+ return 0, err
+ }
+ if len(res) == 0 {
+ return 0, db.ErrGetResourceIndexFailed
+ }
+ return strconv.ParseInt(string(res[0]["max_index"]), 10, 64)
+}
+
+func mysqlGetCommitStatusIndex(ctx context.Context, repoID int64, sha string) (int64, error) {
+ if _, err := db.GetEngine(ctx).Exec("INSERT INTO `commit_status_index` (repo_id, sha, max_index) "+
+ "VALUES (?,?,1) ON DUPLICATE KEY UPDATE max_index = max_index+1",
+ repoID, sha); err != nil {
+ return 0, err
+ }
+
+ var idx int64
+ _, err := db.GetEngine(ctx).SQL("SELECT max_index FROM `commit_status_index` WHERE repo_id = ? AND sha = ?",
+ repoID, sha).Get(&idx)
+ if err != nil {
+ return 0, err
+ }
+ if idx == 0 {
+ return 0, errors.New("cannot get the correct index")
+ }
+ return idx, nil
+}
+
+// GetNextCommitStatusIndex retried 3 times to generate a resource index
+func GetNextCommitStatusIndex(ctx context.Context, repoID int64, sha string) (int64, error) {
+ _, err := git.NewIDFromString(sha)
+ if err != nil {
+ return 0, git.ErrInvalidSHA{SHA: sha}
+ }
+
+ switch {
+ case setting.Database.Type.IsPostgreSQL():
+ return postgresGetCommitStatusIndex(ctx, repoID, sha)
+ case setting.Database.Type.IsMySQL():
+ return mysqlGetCommitStatusIndex(ctx, repoID, sha)
+ }
+
+ e := db.GetEngine(ctx)
+
+ // try to update the max_index to next value, and acquire the write-lock for the record
+ res, err := e.Exec("UPDATE `commit_status_index` SET max_index=max_index+1 WHERE repo_id=? AND sha=?", repoID, sha)
+ if err != nil {
+ return 0, fmt.Errorf("update failed: %w", err)
+ }
+ affected, err := res.RowsAffected()
+ if err != nil {
+ return 0, err
+ }
+ if affected == 0 {
+ // this slow path is only for the first time of creating a resource index
+ _, errIns := e.Exec("INSERT INTO `commit_status_index` (repo_id, sha, max_index) VALUES (?, ?, 0)", repoID, sha)
+ res, err = e.Exec("UPDATE `commit_status_index` SET max_index=max_index+1 WHERE repo_id=? AND sha=?", repoID, sha)
+ if err != nil {
+ return 0, fmt.Errorf("update2 failed: %w", err)
+ }
+ affected, err = res.RowsAffected()
+ if err != nil {
+ return 0, fmt.Errorf("RowsAffected failed: %w", err)
+ }
+ // if the update still can not update any records, the record must not exist and there must be some errors (insert error)
+ if affected == 0 {
+ if errIns == nil {
+ return 0, errors.New("impossible error when GetNextCommitStatusIndex, insert and update both succeeded but no record is updated")
+ }
+ return 0, fmt.Errorf("insert failed: %w", errIns)
+ }
+ }
+
+ // now, the new index is in database (protected by the transaction and write-lock)
+ var newIdx int64
+ has, err := e.SQL("SELECT max_index FROM `commit_status_index` WHERE repo_id=? AND sha=?", repoID, sha).Get(&newIdx)
+ if err != nil {
+ return 0, fmt.Errorf("select failed: %w", err)
+ }
+ if !has {
+ return 0, errors.New("impossible error when GetNextCommitStatusIndex, upsert succeeded but no record can be selected")
+ }
+ return newIdx, nil
+}
+
+func (status *CommitStatus) loadAttributes(ctx context.Context) (err error) {
+ if status.Repo == nil {
+ status.Repo, err = repo_model.GetRepositoryByID(ctx, status.RepoID)
+ if err != nil {
+ return fmt.Errorf("getRepositoryByID [%d]: %w", status.RepoID, err)
+ }
+ }
+ if status.Creator == nil && status.CreatorID > 0 {
+ status.Creator, err = user_model.GetUserByID(ctx, status.CreatorID)
+ if err != nil {
+ return fmt.Errorf("getUserByID [%d]: %w", status.CreatorID, err)
+ }
+ }
+ return nil
+}
+
+// APIURL returns the absolute APIURL to this commit-status.
+func (status *CommitStatus) APIURL(ctx context.Context) string {
+ _ = status.loadAttributes(ctx)
+ return status.Repo.APIURL() + "/statuses/" + url.PathEscape(status.SHA)
+}
+
+// LocaleString returns the locale string name of the Status
+func (status *CommitStatus) LocaleString(lang translation.Locale) string {
+ return lang.TrString("repo.commitstatus." + status.State.String())
+}
+
+// CalcCommitStatus returns commit status state via some status, the commit statues should order by id desc
+func CalcCommitStatus(statuses []*CommitStatus) *CommitStatus {
+ if len(statuses) == 0 {
+ return nil
+ }
+
+ latestWorstStatus := statuses[0]
+ for _, status := range statuses[1:] {
+ if status.State.NoBetterThan(latestWorstStatus.State) {
+ latestWorstStatus = status
+ }
+ }
+ return latestWorstStatus
+}
+
+// CommitStatusOptions holds the options for query commit statuses
+type CommitStatusOptions struct {
+ db.ListOptions
+ RepoID int64
+ SHA string
+ State string
+ SortType string
+}
+
+func (opts *CommitStatusOptions) ToConds() builder.Cond {
+ var cond builder.Cond = builder.Eq{
+ "repo_id": opts.RepoID,
+ "sha": opts.SHA,
+ }
+
+ switch opts.State {
+ case "pending", "success", "error", "failure", "warning":
+ cond = cond.And(builder.Eq{
+ "state": opts.State,
+ })
+ }
+
+ return cond
+}
+
+func (opts *CommitStatusOptions) ToOrders() string {
+ switch opts.SortType {
+ case "oldest":
+ return "created_unix ASC"
+ case "recentupdate":
+ return "updated_unix DESC"
+ case "leastupdate":
+ return "updated_unix ASC"
+ case "leastindex":
+ return "`index` DESC"
+ case "highestindex":
+ return "`index` ASC"
+ default:
+ return "created_unix DESC"
+ }
+}
+
+// CommitStatusIndex represents a table for commit status index
+type CommitStatusIndex struct {
+ ID int64
+ RepoID int64 `xorm:"unique(repo_sha)"`
+ SHA string `xorm:"unique(repo_sha)"`
+ MaxIndex int64 `xorm:"index"`
+}
+
+// GetLatestCommitStatus returns all statuses with a unique context for a given commit.
+func GetLatestCommitStatus(ctx context.Context, repoID int64, sha string, listOptions db.ListOptions) ([]*CommitStatus, int64, error) {
+ getBase := func() *xorm.Session {
+ return db.GetEngine(ctx).Table(&CommitStatus{}).
+ Where("repo_id = ?", repoID).And("sha = ?", sha)
+ }
+ indices := make([]int64, 0, 10)
+ sess := getBase().Select("max( `index` ) as `index`").
+ GroupBy("context_hash").OrderBy("max( `index` ) desc")
+ if !listOptions.IsListAll() {
+ sess = db.SetSessionPagination(sess, &listOptions)
+ }
+ count, err := sess.FindAndCount(&indices)
+ if err != nil {
+ return nil, count, err
+ }
+ statuses := make([]*CommitStatus, 0, len(indices))
+ if len(indices) == 0 {
+ return statuses, count, nil
+ }
+ return statuses, count, getBase().And(builder.In("`index`", indices)).Find(&statuses)
+}
+
+// GetLatestCommitStatusForPairs returns all statuses with a unique context for a given list of repo-sha pairs
+func GetLatestCommitStatusForPairs(ctx context.Context, repoSHAs []RepoSHA) (map[int64][]*CommitStatus, error) {
+ type result struct {
+ Index int64
+ RepoID int64
+ SHA string
+ }
+
+ results := make([]result, 0, len(repoSHAs))
+
+ getBase := func() *xorm.Session {
+ return db.GetEngine(ctx).Table(&CommitStatus{})
+ }
+
+ // Create a disjunction of conditions for each repoID and SHA pair
+ conds := make([]builder.Cond, 0, len(repoSHAs))
+ for _, repoSHA := range repoSHAs {
+ conds = append(conds, builder.Eq{"repo_id": repoSHA.RepoID, "sha": repoSHA.SHA})
+ }
+ sess := getBase().Where(builder.Or(conds...)).
+ Select("max( `index` ) as `index`, repo_id, sha").
+ GroupBy("context_hash, repo_id, sha").OrderBy("max( `index` ) desc")
+
+ err := sess.Find(&results)
+ if err != nil {
+ return nil, err
+ }
+
+ repoStatuses := make(map[int64][]*CommitStatus)
+
+ if len(results) > 0 {
+ statuses := make([]*CommitStatus, 0, len(results))
+
+ conds = make([]builder.Cond, 0, len(results))
+ for _, result := range results {
+ cond := builder.Eq{
+ "`index`": result.Index,
+ "repo_id": result.RepoID,
+ "sha": result.SHA,
+ }
+ conds = append(conds, cond)
+ }
+ err = getBase().Where(builder.Or(conds...)).Find(&statuses)
+ if err != nil {
+ return nil, err
+ }
+
+ // Group the statuses by repo ID
+ for _, status := range statuses {
+ repoStatuses[status.RepoID] = append(repoStatuses[status.RepoID], status)
+ }
+ }
+
+ return repoStatuses, nil
+}
+
+// GetLatestCommitStatusForRepoCommitIDs returns all statuses with a unique context for a given list of repo-sha pairs
+func GetLatestCommitStatusForRepoCommitIDs(ctx context.Context, repoID int64, commitIDs []string) (map[string][]*CommitStatus, error) {
+ type result struct {
+ Index int64
+ SHA string
+ }
+
+ getBase := func() *xorm.Session {
+ return db.GetEngine(ctx).Table(&CommitStatus{}).Where("repo_id = ?", repoID)
+ }
+ results := make([]result, 0, len(commitIDs))
+
+ conds := make([]builder.Cond, 0, len(commitIDs))
+ for _, sha := range commitIDs {
+ conds = append(conds, builder.Eq{"sha": sha})
+ }
+ sess := getBase().And(builder.Or(conds...)).
+ Select("max( `index` ) as `index`, sha").
+ GroupBy("context_hash, sha").OrderBy("max( `index` ) desc")
+
+ err := sess.Find(&results)
+ if err != nil {
+ return nil, err
+ }
+
+ repoStatuses := make(map[string][]*CommitStatus)
+
+ if len(results) > 0 {
+ statuses := make([]*CommitStatus, 0, len(results))
+
+ conds = make([]builder.Cond, 0, len(results))
+ for _, result := range results {
+ conds = append(conds, builder.Eq{"`index`": result.Index, "sha": result.SHA})
+ }
+ err = getBase().And(builder.Or(conds...)).Find(&statuses)
+ if err != nil {
+ return nil, err
+ }
+
+ // Group the statuses by commit
+ for _, status := range statuses {
+ repoStatuses[status.SHA] = append(repoStatuses[status.SHA], status)
+ }
+ }
+
+ return repoStatuses, nil
+}
+
+// FindRepoRecentCommitStatusContexts returns repository's recent commit status contexts
+func FindRepoRecentCommitStatusContexts(ctx context.Context, repoID int64, before time.Duration) ([]string, error) {
+ start := timeutil.TimeStampNow().AddDuration(-before)
+
+ var contexts []string
+ if err := db.GetEngine(ctx).Table("commit_status").
+ Where("repo_id = ?", repoID).And("updated_unix >= ?", start).
+ Cols("context").Distinct().Find(&contexts); err != nil {
+ return nil, err
+ }
+
+ return contexts, nil
+}
+
+// NewCommitStatusOptions holds options for creating a CommitStatus
+type NewCommitStatusOptions struct {
+ Repo *repo_model.Repository
+ Creator *user_model.User
+ SHA git.ObjectID
+ CommitStatus *CommitStatus
+}
+
+// NewCommitStatus save commit statuses into database
+func NewCommitStatus(ctx context.Context, opts NewCommitStatusOptions) error {
+ if opts.Repo == nil {
+ return fmt.Errorf("NewCommitStatus[nil, %s]: no repository specified", opts.SHA)
+ }
+
+ repoPath := opts.Repo.RepoPath()
+ if opts.Creator == nil {
+ return fmt.Errorf("NewCommitStatus[%s, %s]: no user specified", repoPath, opts.SHA)
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return fmt.Errorf("NewCommitStatus[repo_id: %d, user_id: %d, sha: %s]: %w", opts.Repo.ID, opts.Creator.ID, opts.SHA, err)
+ }
+ defer committer.Close()
+
+ // Get the next Status Index
+ idx, err := GetNextCommitStatusIndex(ctx, opts.Repo.ID, opts.SHA.String())
+ if err != nil {
+ return fmt.Errorf("generate commit status index failed: %w", err)
+ }
+
+ opts.CommitStatus.Description = strings.TrimSpace(opts.CommitStatus.Description)
+ opts.CommitStatus.Context = strings.TrimSpace(opts.CommitStatus.Context)
+ opts.CommitStatus.TargetURL = strings.TrimSpace(opts.CommitStatus.TargetURL)
+ opts.CommitStatus.SHA = opts.SHA.String()
+ opts.CommitStatus.CreatorID = opts.Creator.ID
+ opts.CommitStatus.RepoID = opts.Repo.ID
+ opts.CommitStatus.Index = idx
+ log.Debug("NewCommitStatus[%s, %s]: %d", repoPath, opts.SHA, opts.CommitStatus.Index)
+
+ opts.CommitStatus.ContextHash = hashCommitStatusContext(opts.CommitStatus.Context)
+
+ // Insert new CommitStatus
+ if _, err = db.GetEngine(ctx).Insert(opts.CommitStatus); err != nil {
+ return fmt.Errorf("insert CommitStatus[%s, %s]: %w", repoPath, opts.SHA, err)
+ }
+
+ return committer.Commit()
+}
+
+// SignCommitWithStatuses represents a commit with validation of signature and status state.
+type SignCommitWithStatuses struct {
+ Status *CommitStatus
+ Statuses []*CommitStatus
+ *asymkey_model.SignCommit
+}
+
+// ParseCommitsWithStatus checks commits latest statuses and calculates its worst status state
+func ParseCommitsWithStatus(ctx context.Context, oldCommits []*asymkey_model.SignCommit, repo *repo_model.Repository) []*SignCommitWithStatuses {
+ newCommits := make([]*SignCommitWithStatuses, 0, len(oldCommits))
+
+ for _, c := range oldCommits {
+ commit := &SignCommitWithStatuses{
+ SignCommit: c,
+ }
+ statuses, _, err := GetLatestCommitStatus(ctx, repo.ID, commit.ID.String(), db.ListOptions{})
+ if err != nil {
+ log.Error("GetLatestCommitStatus: %v", err)
+ } else {
+ commit.Statuses = statuses
+ commit.Status = CalcCommitStatus(statuses)
+ }
+
+ newCommits = append(newCommits, commit)
+ }
+ return newCommits
+}
+
+// hashCommitStatusContext hash context
+func hashCommitStatusContext(context string) string {
+ return fmt.Sprintf("%x", sha1.Sum([]byte(context)))
+}
+
+// ConvertFromGitCommit converts git commits into SignCommitWithStatuses
+func ConvertFromGitCommit(ctx context.Context, commits []*git.Commit, repo *repo_model.Repository) []*SignCommitWithStatuses {
+ return ParseCommitsWithStatus(ctx,
+ asymkey_model.ParseCommitsWithSignature(
+ ctx,
+ user_model.ValidateCommitsWithEmails(ctx, commits),
+ repo.GetTrustModel(),
+ func(user *user_model.User) (bool, error) {
+ return repo_model.IsOwnerMemberCollaborator(ctx, repo, user.ID)
+ },
+ ),
+ repo,
+ )
+}
diff --git a/models/git/commit_status_summary.go b/models/git/commit_status_summary.go
new file mode 100644
index 00000000..7603e7aa
--- /dev/null
+++ b/models/git/commit_status_summary.go
@@ -0,0 +1,88 @@
+// Copyright 2024 Gitea. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/setting"
+ api "code.gitea.io/gitea/modules/structs"
+
+ "xorm.io/builder"
+)
+
+// CommitStatusSummary holds the latest commit Status of a single Commit
+type CommitStatusSummary struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX UNIQUE(repo_id_sha)"`
+ SHA string `xorm:"VARCHAR(64) NOT NULL INDEX UNIQUE(repo_id_sha)"`
+ State api.CommitStatusState `xorm:"VARCHAR(7) NOT NULL"`
+ TargetURL string `xorm:"TEXT"`
+}
+
+func init() {
+ db.RegisterModel(new(CommitStatusSummary))
+}
+
+type RepoSHA struct {
+ RepoID int64
+ SHA string
+}
+
+func GetLatestCommitStatusForRepoAndSHAs(ctx context.Context, repoSHAs []RepoSHA) ([]*CommitStatus, error) {
+ cond := builder.NewCond()
+ for _, rs := range repoSHAs {
+ cond = cond.Or(builder.Eq{"repo_id": rs.RepoID, "sha": rs.SHA})
+ }
+
+ var summaries []CommitStatusSummary
+ if err := db.GetEngine(ctx).Where(cond).Find(&summaries); err != nil {
+ return nil, err
+ }
+
+ commitStatuses := make([]*CommitStatus, 0, len(repoSHAs))
+ for _, summary := range summaries {
+ commitStatuses = append(commitStatuses, &CommitStatus{
+ RepoID: summary.RepoID,
+ SHA: summary.SHA,
+ State: summary.State,
+ TargetURL: summary.TargetURL,
+ })
+ }
+ return commitStatuses, nil
+}
+
+func UpdateCommitStatusSummary(ctx context.Context, repoID int64, sha string) error {
+ commitStatuses, _, err := GetLatestCommitStatus(ctx, repoID, sha, db.ListOptionsAll)
+ if err != nil {
+ return err
+ }
+ state := CalcCommitStatus(commitStatuses)
+ // mysql will return 0 when update a record which state hasn't been changed which behaviour is different from other database,
+ // so we need to use insert in on duplicate
+ if setting.Database.Type.IsMySQL() {
+ _, err := db.GetEngine(ctx).Exec("INSERT INTO commit_status_summary (repo_id,sha,state,target_url) VALUES (?,?,?,?) ON DUPLICATE KEY UPDATE state=?",
+ repoID, sha, state.State, state.TargetURL, state.State)
+ return err
+ }
+
+ if cnt, err := db.GetEngine(ctx).Where("repo_id=? AND sha=?", repoID, sha).
+ Cols("state, target_url").
+ Update(&CommitStatusSummary{
+ State: state.State,
+ TargetURL: state.TargetURL,
+ }); err != nil {
+ return err
+ } else if cnt == 0 {
+ _, err = db.GetEngine(ctx).Insert(&CommitStatusSummary{
+ RepoID: repoID,
+ SHA: sha,
+ State: state.State,
+ TargetURL: state.TargetURL,
+ })
+ return err
+ }
+ return nil
+}
diff --git a/models/git/commit_status_test.go b/models/git/commit_status_test.go
new file mode 100644
index 00000000..07b9031c
--- /dev/null
+++ b/models/git/commit_status_test.go
@@ -0,0 +1,242 @@
+// Copyright 2017 Gitea. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git_test
+
+import (
+ "testing"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ git_model "code.gitea.io/gitea/models/git"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unittest"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/git"
+ "code.gitea.io/gitea/modules/gitrepo"
+ "code.gitea.io/gitea/modules/structs"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestGetCommitStatuses(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1})
+
+ sha1 := "1234123412341234123412341234123412341234"
+
+ statuses, maxResults, err := db.FindAndCount[git_model.CommitStatus](db.DefaultContext, &git_model.CommitStatusOptions{
+ ListOptions: db.ListOptions{Page: 1, PageSize: 50},
+ RepoID: repo1.ID,
+ SHA: sha1,
+ })
+ require.NoError(t, err)
+ assert.Equal(t, 5, int(maxResults))
+ assert.Len(t, statuses, 5)
+
+ assert.Equal(t, "ci/awesomeness", statuses[0].Context)
+ assert.Equal(t, structs.CommitStatusPending, statuses[0].State)
+ assert.Equal(t, "https://try.gitea.io/api/v1/repos/user2/repo1/statuses/1234123412341234123412341234123412341234", statuses[0].APIURL(db.DefaultContext))
+
+ assert.Equal(t, "cov/awesomeness", statuses[1].Context)
+ assert.Equal(t, structs.CommitStatusWarning, statuses[1].State)
+ assert.Equal(t, "https://try.gitea.io/api/v1/repos/user2/repo1/statuses/1234123412341234123412341234123412341234", statuses[1].APIURL(db.DefaultContext))
+
+ assert.Equal(t, "cov/awesomeness", statuses[2].Context)
+ assert.Equal(t, structs.CommitStatusSuccess, statuses[2].State)
+ assert.Equal(t, "https://try.gitea.io/api/v1/repos/user2/repo1/statuses/1234123412341234123412341234123412341234", statuses[2].APIURL(db.DefaultContext))
+
+ assert.Equal(t, "ci/awesomeness", statuses[3].Context)
+ assert.Equal(t, structs.CommitStatusFailure, statuses[3].State)
+ assert.Equal(t, "https://try.gitea.io/api/v1/repos/user2/repo1/statuses/1234123412341234123412341234123412341234", statuses[3].APIURL(db.DefaultContext))
+
+ assert.Equal(t, "deploy/awesomeness", statuses[4].Context)
+ assert.Equal(t, structs.CommitStatusError, statuses[4].State)
+ assert.Equal(t, "https://try.gitea.io/api/v1/repos/user2/repo1/statuses/1234123412341234123412341234123412341234", statuses[4].APIURL(db.DefaultContext))
+
+ statuses, maxResults, err = db.FindAndCount[git_model.CommitStatus](db.DefaultContext, &git_model.CommitStatusOptions{
+ ListOptions: db.ListOptions{Page: 2, PageSize: 50},
+ RepoID: repo1.ID,
+ SHA: sha1,
+ })
+ require.NoError(t, err)
+ assert.Equal(t, 5, int(maxResults))
+ assert.Empty(t, statuses)
+}
+
+func Test_CalcCommitStatus(t *testing.T) {
+ kases := []struct {
+ statuses []*git_model.CommitStatus
+ expected *git_model.CommitStatus
+ }{
+ {
+ statuses: []*git_model.CommitStatus{
+ {
+ State: structs.CommitStatusPending,
+ },
+ },
+ expected: &git_model.CommitStatus{
+ State: structs.CommitStatusPending,
+ },
+ },
+ {
+ statuses: []*git_model.CommitStatus{
+ {
+ State: structs.CommitStatusSuccess,
+ },
+ {
+ State: structs.CommitStatusPending,
+ },
+ },
+ expected: &git_model.CommitStatus{
+ State: structs.CommitStatusPending,
+ },
+ },
+ {
+ statuses: []*git_model.CommitStatus{
+ {
+ State: structs.CommitStatusSuccess,
+ },
+ {
+ State: structs.CommitStatusPending,
+ },
+ {
+ State: structs.CommitStatusSuccess,
+ },
+ },
+ expected: &git_model.CommitStatus{
+ State: structs.CommitStatusPending,
+ },
+ },
+ {
+ statuses: []*git_model.CommitStatus{
+ {
+ State: structs.CommitStatusError,
+ },
+ {
+ State: structs.CommitStatusPending,
+ },
+ {
+ State: structs.CommitStatusSuccess,
+ },
+ },
+ expected: &git_model.CommitStatus{
+ State: structs.CommitStatusError,
+ },
+ },
+ {
+ statuses: []*git_model.CommitStatus{
+ {
+ State: structs.CommitStatusWarning,
+ },
+ {
+ State: structs.CommitStatusPending,
+ },
+ {
+ State: structs.CommitStatusSuccess,
+ },
+ },
+ expected: &git_model.CommitStatus{
+ State: structs.CommitStatusWarning,
+ },
+ },
+ {
+ statuses: []*git_model.CommitStatus{
+ {
+ State: structs.CommitStatusSuccess,
+ ID: 1,
+ },
+ {
+ State: structs.CommitStatusSuccess,
+ ID: 2,
+ },
+ {
+ State: structs.CommitStatusSuccess,
+ ID: 3,
+ },
+ },
+ expected: &git_model.CommitStatus{
+ State: structs.CommitStatusSuccess,
+ ID: 3,
+ },
+ },
+ {
+ statuses: []*git_model.CommitStatus{
+ {
+ State: structs.CommitStatusFailure,
+ },
+ {
+ State: structs.CommitStatusError,
+ },
+ {
+ State: structs.CommitStatusWarning,
+ },
+ },
+ expected: &git_model.CommitStatus{
+ State: structs.CommitStatusError,
+ },
+ },
+ {
+ statuses: []*git_model.CommitStatus{},
+ expected: nil,
+ },
+ }
+
+ for _, kase := range kases {
+ assert.Equal(t, kase.expected, git_model.CalcCommitStatus(kase.statuses))
+ }
+}
+
+func TestFindRepoRecentCommitStatusContexts(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ repo2 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 2})
+ user2 := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 2})
+ gitRepo, err := gitrepo.OpenRepository(git.DefaultContext, repo2)
+ require.NoError(t, err)
+ defer gitRepo.Close()
+
+ commit, err := gitRepo.GetBranchCommit(repo2.DefaultBranch)
+ require.NoError(t, err)
+
+ defer func() {
+ _, err := db.DeleteByBean(db.DefaultContext, &git_model.CommitStatus{
+ RepoID: repo2.ID,
+ CreatorID: user2.ID,
+ SHA: commit.ID.String(),
+ })
+ require.NoError(t, err)
+ }()
+
+ err = git_model.NewCommitStatus(db.DefaultContext, git_model.NewCommitStatusOptions{
+ Repo: repo2,
+ Creator: user2,
+ SHA: commit.ID,
+ CommitStatus: &git_model.CommitStatus{
+ State: structs.CommitStatusFailure,
+ TargetURL: "https://example.com/tests/",
+ Context: "compliance/lint-backend",
+ },
+ })
+ require.NoError(t, err)
+
+ err = git_model.NewCommitStatus(db.DefaultContext, git_model.NewCommitStatusOptions{
+ Repo: repo2,
+ Creator: user2,
+ SHA: commit.ID,
+ CommitStatus: &git_model.CommitStatus{
+ State: structs.CommitStatusSuccess,
+ TargetURL: "https://example.com/tests/",
+ Context: "compliance/lint-backend",
+ },
+ })
+ require.NoError(t, err)
+
+ contexts, err := git_model.FindRepoRecentCommitStatusContexts(db.DefaultContext, repo2.ID, time.Hour)
+ require.NoError(t, err)
+ if assert.Len(t, contexts, 1) {
+ assert.Equal(t, "compliance/lint-backend", contexts[0])
+ }
+}
diff --git a/models/git/lfs.go b/models/git/lfs.go
new file mode 100644
index 00000000..44b741c4
--- /dev/null
+++ b/models/git/lfs.go
@@ -0,0 +1,419 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/lfs"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "xorm.io/builder"
+)
+
+// ErrLFSLockNotExist represents a "LFSLockNotExist" kind of error.
+type ErrLFSLockNotExist struct {
+ ID int64
+ RepoID int64
+ Path string
+}
+
+// IsErrLFSLockNotExist checks if an error is a ErrLFSLockNotExist.
+func IsErrLFSLockNotExist(err error) bool {
+ _, ok := err.(ErrLFSLockNotExist)
+ return ok
+}
+
+func (err ErrLFSLockNotExist) Error() string {
+ return fmt.Sprintf("lfs lock does not exist [id: %d, rid: %d, path: %s]", err.ID, err.RepoID, err.Path)
+}
+
+func (err ErrLFSLockNotExist) Unwrap() error {
+ return util.ErrNotExist
+}
+
+// ErrLFSUnauthorizedAction represents a "LFSUnauthorizedAction" kind of error.
+type ErrLFSUnauthorizedAction struct {
+ RepoID int64
+ UserName string
+ Mode perm.AccessMode
+}
+
+// IsErrLFSUnauthorizedAction checks if an error is a ErrLFSUnauthorizedAction.
+func IsErrLFSUnauthorizedAction(err error) bool {
+ _, ok := err.(ErrLFSUnauthorizedAction)
+ return ok
+}
+
+func (err ErrLFSUnauthorizedAction) Error() string {
+ if err.Mode == perm.AccessModeWrite {
+ return fmt.Sprintf("User %s doesn't have write access for lfs lock [rid: %d]", err.UserName, err.RepoID)
+ }
+ return fmt.Sprintf("User %s doesn't have read access for lfs lock [rid: %d]", err.UserName, err.RepoID)
+}
+
+func (err ErrLFSUnauthorizedAction) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// ErrLFSLockAlreadyExist represents a "LFSLockAlreadyExist" kind of error.
+type ErrLFSLockAlreadyExist struct {
+ RepoID int64
+ Path string
+}
+
+// IsErrLFSLockAlreadyExist checks if an error is a ErrLFSLockAlreadyExist.
+func IsErrLFSLockAlreadyExist(err error) bool {
+ _, ok := err.(ErrLFSLockAlreadyExist)
+ return ok
+}
+
+func (err ErrLFSLockAlreadyExist) Error() string {
+ return fmt.Sprintf("lfs lock already exists [rid: %d, path: %s]", err.RepoID, err.Path)
+}
+
+func (err ErrLFSLockAlreadyExist) Unwrap() error {
+ return util.ErrAlreadyExist
+}
+
+// ErrLFSFileLocked represents a "LFSFileLocked" kind of error.
+type ErrLFSFileLocked struct {
+ RepoID int64
+ Path string
+ UserName string
+}
+
+// IsErrLFSFileLocked checks if an error is a ErrLFSFileLocked.
+func IsErrLFSFileLocked(err error) bool {
+ _, ok := err.(ErrLFSFileLocked)
+ return ok
+}
+
+func (err ErrLFSFileLocked) Error() string {
+ return fmt.Sprintf("File is lfs locked [repo: %d, locked by: %s, path: %s]", err.RepoID, err.UserName, err.Path)
+}
+
+func (err ErrLFSFileLocked) Unwrap() error {
+ return util.ErrPermissionDenied
+}
+
+// LFSMetaObject stores metadata for LFS tracked files.
+type LFSMetaObject struct {
+ ID int64 `xorm:"pk autoincr"`
+ lfs.Pointer `xorm:"extends"`
+ RepositoryID int64 `xorm:"UNIQUE(s) INDEX NOT NULL"`
+ Existing bool `xorm:"-"`
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"INDEX updated"`
+}
+
+func init() {
+ db.RegisterModel(new(LFSMetaObject))
+}
+
+// LFSTokenResponse defines the JSON structure in which the JWT token is stored.
+// This structure is fetched via SSH and passed by the Git LFS client to the server
+// endpoint for authorization.
+type LFSTokenResponse struct {
+ Header map[string]string `json:"header"`
+ Href string `json:"href"`
+}
+
+// ErrLFSObjectNotExist is returned from lfs models functions in order
+// to differentiate between database and missing object errors.
+var ErrLFSObjectNotExist = db.ErrNotExist{Resource: "LFS Meta object"}
+
+// NewLFSMetaObject stores a given populated LFSMetaObject structure in the database
+// if it is not already present.
+func NewLFSMetaObject(ctx context.Context, repoID int64, p lfs.Pointer) (*LFSMetaObject, error) {
+ var err error
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ m, exist, err := db.Get[LFSMetaObject](ctx, builder.Eq{"repository_id": repoID, "oid": p.Oid})
+ if err != nil {
+ return nil, err
+ } else if exist {
+ m.Existing = true
+ return m, committer.Commit()
+ }
+
+ m = &LFSMetaObject{Pointer: p, RepositoryID: repoID}
+ if err = db.Insert(ctx, m); err != nil {
+ return nil, err
+ }
+
+ return m, committer.Commit()
+}
+
+// GetLFSMetaObjectByOid selects a LFSMetaObject entry from database by its OID.
+// It may return ErrLFSObjectNotExist or a database error. If the error is nil,
+// the returned pointer is a valid LFSMetaObject.
+func GetLFSMetaObjectByOid(ctx context.Context, repoID int64, oid string) (*LFSMetaObject, error) {
+ if len(oid) == 0 {
+ return nil, ErrLFSObjectNotExist
+ }
+
+ m := &LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}, RepositoryID: repoID}
+ has, err := db.GetEngine(ctx).Get(m)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrLFSObjectNotExist
+ }
+ return m, nil
+}
+
+// RemoveLFSMetaObjectByOid removes a LFSMetaObject entry from database by its OID.
+// It may return ErrLFSObjectNotExist or a database error.
+func RemoveLFSMetaObjectByOid(ctx context.Context, repoID int64, oid string) (int64, error) {
+ return RemoveLFSMetaObjectByOidFn(ctx, repoID, oid, nil)
+}
+
+// RemoveLFSMetaObjectByOidFn removes a LFSMetaObject entry from database by its OID.
+// It may return ErrLFSObjectNotExist or a database error. It will run Fn with the current count within the transaction
+func RemoveLFSMetaObjectByOidFn(ctx context.Context, repoID int64, oid string, fn func(count int64) error) (int64, error) {
+ if len(oid) == 0 {
+ return 0, ErrLFSObjectNotExist
+ }
+
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return 0, err
+ }
+ defer committer.Close()
+
+ m := &LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}, RepositoryID: repoID}
+ if _, err := db.DeleteByBean(ctx, m); err != nil {
+ return -1, err
+ }
+
+ count, err := db.CountByBean(ctx, &LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
+ if err != nil {
+ return count, err
+ }
+
+ if fn != nil {
+ if err := fn(count); err != nil {
+ return count, err
+ }
+ }
+
+ return count, committer.Commit()
+}
+
+// GetLFSMetaObjects returns all LFSMetaObjects associated with a repository
+func GetLFSMetaObjects(ctx context.Context, repoID int64, page, pageSize int) ([]*LFSMetaObject, error) {
+ sess := db.GetEngine(ctx)
+
+ if page >= 0 && pageSize > 0 {
+ start := 0
+ if page > 0 {
+ start = (page - 1) * pageSize
+ }
+ sess.Limit(pageSize, start)
+ }
+ lfsObjects := make([]*LFSMetaObject, 0, pageSize)
+ return lfsObjects, sess.Find(&lfsObjects, &LFSMetaObject{RepositoryID: repoID})
+}
+
+// CountLFSMetaObjects returns a count of all LFSMetaObjects associated with a repository
+func CountLFSMetaObjects(ctx context.Context, repoID int64) (int64, error) {
+ return db.GetEngine(ctx).Count(&LFSMetaObject{RepositoryID: repoID})
+}
+
+// LFSObjectAccessible checks if a provided Oid is accessible to the user
+func LFSObjectAccessible(ctx context.Context, user *user_model.User, oid string) (bool, error) {
+ if user.IsAdmin {
+ count, err := db.GetEngine(ctx).Count(&LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
+ return count > 0, err
+ }
+ cond := repo_model.AccessibleRepositoryCondition(user, unit.TypeInvalid)
+ count, err := db.GetEngine(ctx).Where(cond).Join("INNER", "repository", "`lfs_meta_object`.repository_id = `repository`.id").Count(&LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
+ return count > 0, err
+}
+
+// ExistsLFSObject checks if a provided Oid exists within the DB
+func ExistsLFSObject(ctx context.Context, oid string) (bool, error) {
+ return db.GetEngine(ctx).Exist(&LFSMetaObject{Pointer: lfs.Pointer{Oid: oid}})
+}
+
+// LFSAutoAssociate auto associates accessible LFSMetaObjects
+func LFSAutoAssociate(ctx context.Context, metas []*LFSMetaObject, user *user_model.User, repoID int64) error {
+ ctx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return err
+ }
+ defer committer.Close()
+
+ sess := db.GetEngine(ctx)
+
+ oids := make([]any, len(metas))
+ oidMap := make(map[string]*LFSMetaObject, len(metas))
+ for i, meta := range metas {
+ oids[i] = meta.Oid
+ oidMap[meta.Oid] = meta
+ }
+
+ if !user.IsAdmin {
+ newMetas := make([]*LFSMetaObject, 0, len(metas))
+ cond := builder.In(
+ "`lfs_meta_object`.repository_id",
+ builder.Select("`repository`.id").From("repository").Where(repo_model.AccessibleRepositoryCondition(user, unit.TypeInvalid)),
+ )
+ err = sess.Cols("oid").Where(cond).In("oid", oids...).GroupBy("oid").Find(&newMetas)
+ if err != nil {
+ return err
+ }
+ if len(newMetas) != len(oidMap) {
+ return fmt.Errorf("unable collect all LFS objects from database, expected %d, actually %d", len(oidMap), len(newMetas))
+ }
+ for i := range newMetas {
+ newMetas[i].Size = oidMap[newMetas[i].Oid].Size
+ newMetas[i].RepositoryID = repoID
+ }
+ if err = db.Insert(ctx, newMetas); err != nil {
+ return err
+ }
+ } else {
+ // admin can associate any LFS object to any repository, and we do not care about errors (eg: duplicated unique key),
+ // even if error occurs, it won't hurt users and won't make things worse
+ for i := range metas {
+ p := lfs.Pointer{Oid: metas[i].Oid, Size: metas[i].Size}
+ _, err = sess.Insert(&LFSMetaObject{
+ Pointer: p,
+ RepositoryID: repoID,
+ })
+ if err != nil {
+ log.Warn("failed to insert LFS meta object %-v for repo_id: %d into database, err=%v", p, repoID, err)
+ }
+ }
+ }
+ return committer.Commit()
+}
+
+// CopyLFS copies LFS data from one repo to another
+func CopyLFS(ctx context.Context, newRepo, oldRepo *repo_model.Repository) error {
+ var lfsObjects []*LFSMetaObject
+ if err := db.GetEngine(ctx).Where("repository_id=?", oldRepo.ID).Find(&lfsObjects); err != nil {
+ return err
+ }
+
+ for _, v := range lfsObjects {
+ v.ID = 0
+ v.RepositoryID = newRepo.ID
+ if err := db.Insert(ctx, v); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// GetRepoLFSSize return a repository's lfs files size
+func GetRepoLFSSize(ctx context.Context, repoID int64) (int64, error) {
+ lfsSize, err := db.GetEngine(ctx).Where("repository_id = ?", repoID).SumInt(new(LFSMetaObject), "size")
+ if err != nil {
+ return 0, fmt.Errorf("updateSize: GetLFSMetaObjects: %w", err)
+ }
+ return lfsSize, nil
+}
+
+// IterateRepositoryIDsWithLFSMetaObjects iterates across the repositories that have LFSMetaObjects
+func IterateRepositoryIDsWithLFSMetaObjects(ctx context.Context, f func(ctx context.Context, repoID, count int64) error) error {
+ batchSize := setting.Database.IterateBufferSize
+ sess := db.GetEngine(ctx)
+ var start int
+ type RepositoryCount struct {
+ RepositoryID int64
+ Count int64
+ }
+ for {
+ counts := make([]*RepositoryCount, 0, batchSize)
+ if err := sess.Select("repository_id, COUNT(id) AS count").
+ Table("lfs_meta_object").
+ GroupBy("repository_id").
+ OrderBy("repository_id ASC").Limit(batchSize, start).Find(&counts); err != nil {
+ return err
+ }
+ if len(counts) == 0 {
+ return nil
+ }
+ start += len(counts)
+
+ for _, count := range counts {
+ if err := f(ctx, count.RepositoryID, count.Count); err != nil {
+ return err
+ }
+ }
+ }
+}
+
+// IterateLFSMetaObjectsForRepoOptions provides options for IterateLFSMetaObjectsForRepo
+type IterateLFSMetaObjectsForRepoOptions struct {
+ OlderThan timeutil.TimeStamp
+ UpdatedLessRecentlyThan timeutil.TimeStamp
+}
+
+// IterateLFSMetaObjectsForRepo provides a iterator for LFSMetaObjects per Repo
+func IterateLFSMetaObjectsForRepo(ctx context.Context, repoID int64, f func(context.Context, *LFSMetaObject) error, opts *IterateLFSMetaObjectsForRepoOptions) error {
+ batchSize := setting.Database.IterateBufferSize
+ engine := db.GetEngine(ctx)
+ id := int64(0)
+
+ for {
+ beans := make([]*LFSMetaObject, 0, batchSize)
+ sess := engine.Table("lfs_meta_object").Select("`lfs_meta_object`.*").
+ Join("INNER", "`lfs_meta_object` AS l1", "`lfs_meta_object`.oid = `l1`.oid").
+ Where("`lfs_meta_object`.repository_id = ?", repoID)
+ if !opts.OlderThan.IsZero() {
+ sess.And("`lfs_meta_object`.created_unix < ?", opts.OlderThan)
+ }
+ if !opts.UpdatedLessRecentlyThan.IsZero() {
+ sess.And("`lfs_meta_object`.updated_unix < ?", opts.UpdatedLessRecentlyThan)
+ }
+ sess.GroupBy("`lfs_meta_object`.id").
+ And("`lfs_meta_object`.id > ?", id).
+ OrderBy("`lfs_meta_object`.id ASC")
+
+ if err := sess.Limit(batchSize, 0).Find(&beans); err != nil {
+ return err
+ }
+ if len(beans) == 0 {
+ return nil
+ }
+
+ for _, bean := range beans {
+ if err := f(ctx, bean); err != nil {
+ return err
+ }
+ }
+ id = beans[len(beans)-1].ID
+ }
+}
+
+// MarkLFSMetaObject updates the updated time for the provided LFSMetaObject
+func MarkLFSMetaObject(ctx context.Context, id int64) error {
+ obj := &LFSMetaObject{
+ UpdatedUnix: timeutil.TimeStampNow(),
+ }
+ count, err := db.GetEngine(ctx).ID(id).Update(obj)
+ if count != 1 {
+ log.Error("Unexpectedly updated %d LFSMetaObjects with ID: %d", count, id)
+ }
+ return err
+}
diff --git a/models/git/lfs_lock.go b/models/git/lfs_lock.go
new file mode 100644
index 00000000..07ce7d4a
--- /dev/null
+++ b/models/git/lfs_lock.go
@@ -0,0 +1,209 @@
+// Copyright 2017 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/util"
+)
+
+// LFSLock represents a git lfs lock of repository.
+type LFSLock struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"INDEX NOT NULL"`
+ OwnerID int64 `xorm:"INDEX NOT NULL"`
+ Owner *user_model.User `xorm:"-"`
+ Path string `xorm:"TEXT"`
+ Created time.Time `xorm:"created"`
+}
+
+func init() {
+ db.RegisterModel(new(LFSLock))
+}
+
+// BeforeInsert is invoked from XORM before inserting an object of this type.
+func (l *LFSLock) BeforeInsert() {
+ l.Path = util.PathJoinRel(l.Path)
+}
+
+// LoadAttributes loads attributes of the lock.
+func (l *LFSLock) LoadAttributes(ctx context.Context) error {
+ // Load owner
+ if err := l.LoadOwner(ctx); err != nil {
+ return fmt.Errorf("load owner: %w", err)
+ }
+
+ return nil
+}
+
+// LoadOwner loads owner of the lock.
+func (l *LFSLock) LoadOwner(ctx context.Context) error {
+ if l.Owner != nil {
+ return nil
+ }
+
+ owner, err := user_model.GetUserByID(ctx, l.OwnerID)
+ if err != nil {
+ if user_model.IsErrUserNotExist(err) {
+ l.Owner = user_model.NewGhostUser()
+ return nil
+ }
+ return err
+ }
+ l.Owner = owner
+
+ return nil
+}
+
+// CreateLFSLock creates a new lock.
+func CreateLFSLock(ctx context.Context, repo *repo_model.Repository, lock *LFSLock) (*LFSLock, error) {
+ dbCtx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ if err := CheckLFSAccessForRepo(dbCtx, lock.OwnerID, repo, perm.AccessModeWrite); err != nil {
+ return nil, err
+ }
+
+ lock.Path = util.PathJoinRel(lock.Path)
+ lock.RepoID = repo.ID
+
+ l, err := GetLFSLock(dbCtx, repo, lock.Path)
+ if err == nil {
+ return l, ErrLFSLockAlreadyExist{lock.RepoID, lock.Path}
+ }
+ if !IsErrLFSLockNotExist(err) {
+ return nil, err
+ }
+
+ if err := db.Insert(dbCtx, lock); err != nil {
+ return nil, err
+ }
+
+ return lock, committer.Commit()
+}
+
+// GetLFSLock returns release by given path.
+func GetLFSLock(ctx context.Context, repo *repo_model.Repository, path string) (*LFSLock, error) {
+ path = util.PathJoinRel(path)
+ rel := &LFSLock{RepoID: repo.ID}
+ has, err := db.GetEngine(ctx).Where("lower(path) = ?", strings.ToLower(path)).Get(rel)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, ErrLFSLockNotExist{0, repo.ID, path}
+ }
+ return rel, nil
+}
+
+// GetLFSLockByID returns release by given id.
+func GetLFSLockByID(ctx context.Context, id int64) (*LFSLock, error) {
+ lock := new(LFSLock)
+ has, err := db.GetEngine(ctx).ID(id).Get(lock)
+ if err != nil {
+ return nil, err
+ } else if !has {
+ return nil, ErrLFSLockNotExist{id, 0, ""}
+ }
+ return lock, nil
+}
+
+// GetLFSLockByRepoID returns a list of locks of repository.
+func GetLFSLockByRepoID(ctx context.Context, repoID int64, page, pageSize int) (LFSLockList, error) {
+ e := db.GetEngine(ctx)
+ if page >= 0 && pageSize > 0 {
+ start := 0
+ if page > 0 {
+ start = (page - 1) * pageSize
+ }
+ e.Limit(pageSize, start)
+ }
+ lfsLocks := make(LFSLockList, 0, pageSize)
+ return lfsLocks, e.Find(&lfsLocks, &LFSLock{RepoID: repoID})
+}
+
+// GetTreePathLock returns LSF lock for the treePath
+func GetTreePathLock(ctx context.Context, repoID int64, treePath string) (*LFSLock, error) {
+ if !setting.LFS.StartServer {
+ return nil, nil
+ }
+
+ locks, err := GetLFSLockByRepoID(ctx, repoID, 0, 0)
+ if err != nil {
+ return nil, err
+ }
+ for _, lock := range locks {
+ if lock.Path == treePath {
+ return lock, nil
+ }
+ }
+ return nil, nil
+}
+
+// CountLFSLockByRepoID returns a count of all LFSLocks associated with a repository.
+func CountLFSLockByRepoID(ctx context.Context, repoID int64) (int64, error) {
+ return db.GetEngine(ctx).Count(&LFSLock{RepoID: repoID})
+}
+
+// DeleteLFSLockByID deletes a lock by given ID.
+func DeleteLFSLockByID(ctx context.Context, id int64, repo *repo_model.Repository, u *user_model.User, force bool) (*LFSLock, error) {
+ dbCtx, committer, err := db.TxContext(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer committer.Close()
+
+ lock, err := GetLFSLockByID(dbCtx, id)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := CheckLFSAccessForRepo(dbCtx, u.ID, repo, perm.AccessModeWrite); err != nil {
+ return nil, err
+ }
+
+ if !force && u.ID != lock.OwnerID {
+ return nil, errors.New("user doesn't own lock and force flag is not set")
+ }
+
+ if _, err := db.GetEngine(dbCtx).ID(id).Delete(new(LFSLock)); err != nil {
+ return nil, err
+ }
+
+ return lock, committer.Commit()
+}
+
+// CheckLFSAccessForRepo check needed access mode base on action
+func CheckLFSAccessForRepo(ctx context.Context, ownerID int64, repo *repo_model.Repository, mode perm.AccessMode) error {
+ if ownerID == 0 {
+ return ErrLFSUnauthorizedAction{repo.ID, "undefined", mode}
+ }
+ u, err := user_model.GetUserByID(ctx, ownerID)
+ if err != nil {
+ return err
+ }
+ perm, err := access_model.GetUserRepoPermission(ctx, repo, u)
+ if err != nil {
+ return err
+ }
+ if !perm.CanAccess(mode, unit.TypeCode) {
+ return ErrLFSUnauthorizedAction{repo.ID, u.DisplayName(), mode}
+ }
+ return nil
+}
diff --git a/models/git/lfs_lock_list.go b/models/git/lfs_lock_list.go
new file mode 100644
index 00000000..cab1e61c
--- /dev/null
+++ b/models/git/lfs_lock_list.go
@@ -0,0 +1,54 @@
+// Copyright 2024 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+ "fmt"
+
+ "code.gitea.io/gitea/models/db"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/container"
+)
+
+// LFSLockList is a list of LFSLock
+type LFSLockList []*LFSLock
+
+// LoadAttributes loads the attributes for the given locks
+func (locks LFSLockList) LoadAttributes(ctx context.Context) error {
+ if len(locks) == 0 {
+ return nil
+ }
+
+ if err := locks.LoadOwner(ctx); err != nil {
+ return fmt.Errorf("load owner: %w", err)
+ }
+
+ return nil
+}
+
+// LoadOwner loads the owner of the locks
+func (locks LFSLockList) LoadOwner(ctx context.Context) error {
+ if len(locks) == 0 {
+ return nil
+ }
+
+ usersIDs := container.FilterSlice(locks, func(lock *LFSLock) (int64, bool) {
+ return lock.OwnerID, true
+ })
+ users := make(map[int64]*user_model.User, len(usersIDs))
+ if err := db.GetEngine(ctx).
+ In("id", usersIDs).
+ Find(&users); err != nil {
+ return fmt.Errorf("find users: %w", err)
+ }
+ for _, v := range locks {
+ v.Owner = users[v.OwnerID]
+ if v.Owner == nil { // not exist
+ v.Owner = user_model.NewGhostUser()
+ }
+ }
+
+ return nil
+}
diff --git a/models/git/lfs_test.go b/models/git/lfs_test.go
new file mode 100644
index 00000000..afb73ecf
--- /dev/null
+++ b/models/git/lfs_test.go
@@ -0,0 +1,102 @@
+// Copyright 2024 The Forgejo Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+ "path/filepath"
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/unittest"
+ "code.gitea.io/gitea/modules/setting"
+ "code.gitea.io/gitea/modules/test"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIterateRepositoryIDsWithLFSMetaObjects(t *testing.T) {
+ defer unittest.OverrideFixtures(
+ unittest.FixturesOptions{
+ Dir: filepath.Join(setting.AppWorkPath, "models/fixtures/"),
+ Base: setting.AppWorkPath,
+ Dirs: []string{"models/git/TestIterateRepositoryIDsWithLFSMetaObjects/"},
+ },
+ )()
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ type repocount struct {
+ repoid int64
+ count int64
+ }
+ expected := []repocount{{1, 1}, {54, 4}}
+
+ t.Run("Normal batch size", func(t *testing.T) {
+ defer test.MockVariableValue(&setting.Database.IterateBufferSize, 20)()
+ cases := []repocount{}
+
+ err := IterateRepositoryIDsWithLFSMetaObjects(db.DefaultContext, func(ctx context.Context, repoID, count int64) error {
+ cases = append(cases, repocount{repoID, count})
+ return nil
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, cases)
+ })
+
+ t.Run("Low batch size", func(t *testing.T) {
+ defer test.MockVariableValue(&setting.Database.IterateBufferSize, 1)()
+ cases := []repocount{}
+
+ err := IterateRepositoryIDsWithLFSMetaObjects(db.DefaultContext, func(ctx context.Context, repoID, count int64) error {
+ cases = append(cases, repocount{repoID, count})
+ return nil
+ })
+ require.NoError(t, err)
+ assert.EqualValues(t, expected, cases)
+ })
+}
+
+func TestIterateLFSMetaObjectsForRepo(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ expectedIDs := []int64{1, 2, 3, 4}
+
+ t.Run("Normal batch size", func(t *testing.T) {
+ defer test.MockVariableValue(&setting.Database.IterateBufferSize, 20)()
+ actualIDs := []int64{}
+
+ err := IterateLFSMetaObjectsForRepo(db.DefaultContext, 54, func(ctx context.Context, lo *LFSMetaObject) error {
+ actualIDs = append(actualIDs, lo.ID)
+ return nil
+ }, &IterateLFSMetaObjectsForRepoOptions{})
+ require.NoError(t, err)
+ assert.EqualValues(t, expectedIDs, actualIDs)
+ })
+
+ t.Run("Low batch size", func(t *testing.T) {
+ defer test.MockVariableValue(&setting.Database.IterateBufferSize, 1)()
+ actualIDs := []int64{}
+
+ err := IterateLFSMetaObjectsForRepo(db.DefaultContext, 54, func(ctx context.Context, lo *LFSMetaObject) error {
+ actualIDs = append(actualIDs, lo.ID)
+ return nil
+ }, &IterateLFSMetaObjectsForRepoOptions{})
+ require.NoError(t, err)
+ assert.EqualValues(t, expectedIDs, actualIDs)
+
+ t.Run("Batch handles updates", func(t *testing.T) {
+ actualIDs := []int64{}
+
+ err := IterateLFSMetaObjectsForRepo(db.DefaultContext, 54, func(ctx context.Context, lo *LFSMetaObject) error {
+ actualIDs = append(actualIDs, lo.ID)
+ _, err := db.DeleteByID[LFSMetaObject](ctx, lo.ID)
+ require.NoError(t, err)
+ return nil
+ }, &IterateLFSMetaObjectsForRepoOptions{})
+ require.NoError(t, err)
+ assert.EqualValues(t, expectedIDs, actualIDs)
+ })
+ })
+}
diff --git a/models/git/main_test.go b/models/git/main_test.go
new file mode 100644
index 00000000..aab1fa9a
--- /dev/null
+++ b/models/git/main_test.go
@@ -0,0 +1,18 @@
+// Copyright 2020 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/unittest"
+
+ _ "code.gitea.io/gitea/models"
+ _ "code.gitea.io/gitea/models/actions"
+ _ "code.gitea.io/gitea/models/activities"
+)
+
+func TestMain(m *testing.M) {
+ unittest.MainTest(m)
+}
diff --git a/models/git/protected_banch_list_test.go b/models/git/protected_banch_list_test.go
new file mode 100644
index 00000000..09319d21
--- /dev/null
+++ b/models/git/protected_banch_list_test.go
@@ -0,0 +1,77 @@
+// Copyright 2023 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestBranchRuleMatchPriority(t *testing.T) {
+ kases := []struct {
+ Rules []string
+ BranchName string
+ ExpectedMatchIdx int
+ }{
+ {
+ Rules: []string{"release/*", "release/v1.17"},
+ BranchName: "release/v1.17",
+ ExpectedMatchIdx: 1,
+ },
+ {
+ Rules: []string{"release/v1.17", "release/*"},
+ BranchName: "release/v1.17",
+ ExpectedMatchIdx: 0,
+ },
+ {
+ Rules: []string{"release/**/v1.17", "release/test/v1.17"},
+ BranchName: "release/test/v1.17",
+ ExpectedMatchIdx: 1,
+ },
+ {
+ Rules: []string{"release/test/v1.17", "release/**/v1.17"},
+ BranchName: "release/test/v1.17",
+ ExpectedMatchIdx: 0,
+ },
+ {
+ Rules: []string{"release/**", "release/v1.0.0"},
+ BranchName: "release/v1.0.0",
+ ExpectedMatchIdx: 1,
+ },
+ {
+ Rules: []string{"release/v1.0.0", "release/**"},
+ BranchName: "release/v1.0.0",
+ ExpectedMatchIdx: 0,
+ },
+ {
+ Rules: []string{"release/**", "release/v1.0.0"},
+ BranchName: "release/v2.0.0",
+ ExpectedMatchIdx: 0,
+ },
+ {
+ Rules: []string{"release/*", "release/v1.0.0"},
+ BranchName: "release/1/v2.0.0",
+ ExpectedMatchIdx: -1,
+ },
+ }
+
+ for _, kase := range kases {
+ var pbs ProtectedBranchRules
+ for _, rule := range kase.Rules {
+ pbs = append(pbs, &ProtectedBranch{RuleName: rule})
+ }
+ pbs.sort()
+ matchedPB := pbs.GetFirstMatched(kase.BranchName)
+ if matchedPB == nil {
+ if kase.ExpectedMatchIdx >= 0 {
+ require.Error(t, fmt.Errorf("no matched rules but expected %s[%d]", kase.Rules[kase.ExpectedMatchIdx], kase.ExpectedMatchIdx))
+ }
+ } else {
+ assert.EqualValues(t, kase.Rules[kase.ExpectedMatchIdx], matchedPB.RuleName)
+ }
+ }
+}
diff --git a/models/git/protected_branch.go b/models/git/protected_branch.go
new file mode 100644
index 00000000..a8b8c81b
--- /dev/null
+++ b/models/git/protected_branch.go
@@ -0,0 +1,511 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "slices"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/models/perm"
+ access_model "code.gitea.io/gitea/models/perm/access"
+ repo_model "code.gitea.io/gitea/models/repo"
+ "code.gitea.io/gitea/models/unit"
+ user_model "code.gitea.io/gitea/models/user"
+ "code.gitea.io/gitea/modules/log"
+ "code.gitea.io/gitea/modules/timeutil"
+ "code.gitea.io/gitea/modules/util"
+
+ "github.com/gobwas/glob"
+ "github.com/gobwas/glob/syntax"
+ "xorm.io/builder"
+)
+
+var ErrBranchIsProtected = errors.New("branch is protected")
+
+// ProtectedBranch struct
+type ProtectedBranch struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64 `xorm:"UNIQUE(s)"`
+ Repo *repo_model.Repository `xorm:"-"`
+ RuleName string `xorm:"'branch_name' UNIQUE(s)"` // a branch name or a glob match to branch name
+ globRule glob.Glob `xorm:"-"`
+ isPlainName bool `xorm:"-"`
+ CanPush bool `xorm:"NOT NULL DEFAULT false"`
+ EnableWhitelist bool
+ WhitelistUserIDs []int64 `xorm:"JSON TEXT"`
+ WhitelistTeamIDs []int64 `xorm:"JSON TEXT"`
+ EnableMergeWhitelist bool `xorm:"NOT NULL DEFAULT false"`
+ WhitelistDeployKeys bool `xorm:"NOT NULL DEFAULT false"`
+ MergeWhitelistUserIDs []int64 `xorm:"JSON TEXT"`
+ MergeWhitelistTeamIDs []int64 `xorm:"JSON TEXT"`
+ EnableStatusCheck bool `xorm:"NOT NULL DEFAULT false"`
+ StatusCheckContexts []string `xorm:"JSON TEXT"`
+ EnableApprovalsWhitelist bool `xorm:"NOT NULL DEFAULT false"`
+ ApprovalsWhitelistUserIDs []int64 `xorm:"JSON TEXT"`
+ ApprovalsWhitelistTeamIDs []int64 `xorm:"JSON TEXT"`
+ RequiredApprovals int64 `xorm:"NOT NULL DEFAULT 0"`
+ BlockOnRejectedReviews bool `xorm:"NOT NULL DEFAULT false"`
+ BlockOnOfficialReviewRequests bool `xorm:"NOT NULL DEFAULT false"`
+ BlockOnOutdatedBranch bool `xorm:"NOT NULL DEFAULT false"`
+ DismissStaleApprovals bool `xorm:"NOT NULL DEFAULT false"`
+ IgnoreStaleApprovals bool `xorm:"NOT NULL DEFAULT false"`
+ RequireSignedCommits bool `xorm:"NOT NULL DEFAULT false"`
+ ProtectedFilePatterns string `xorm:"TEXT"`
+ UnprotectedFilePatterns string `xorm:"TEXT"`
+ ApplyToAdmins bool `xorm:"NOT NULL DEFAULT false"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
+}
+
+func init() {
+ db.RegisterModel(new(ProtectedBranch))
+}
+
+// IsRuleNameSpecial return true if it contains special character
+func IsRuleNameSpecial(ruleName string) bool {
+ for i := 0; i < len(ruleName); i++ {
+ if syntax.Special(ruleName[i]) {
+ return true
+ }
+ }
+ return false
+}
+
+func (protectBranch *ProtectedBranch) loadGlob() {
+ if protectBranch.globRule == nil {
+ var err error
+ protectBranch.globRule, err = glob.Compile(protectBranch.RuleName, '/')
+ if err != nil {
+ log.Warn("Invalid glob rule for ProtectedBranch[%d]: %s %v", protectBranch.ID, protectBranch.RuleName, err)
+ protectBranch.globRule = glob.MustCompile(glob.QuoteMeta(protectBranch.RuleName), '/')
+ }
+ protectBranch.isPlainName = !IsRuleNameSpecial(protectBranch.RuleName)
+ }
+}
+
+// Match tests if branchName matches the rule
+func (protectBranch *ProtectedBranch) Match(branchName string) bool {
+ protectBranch.loadGlob()
+ if protectBranch.isPlainName {
+ return strings.EqualFold(protectBranch.RuleName, branchName)
+ }
+
+ return protectBranch.globRule.Match(branchName)
+}
+
+func (protectBranch *ProtectedBranch) LoadRepo(ctx context.Context) (err error) {
+ if protectBranch.Repo != nil {
+ return nil
+ }
+ protectBranch.Repo, err = repo_model.GetRepositoryByID(ctx, protectBranch.RepoID)
+ return err
+}
+
+// CanUserPush returns if some user could push to this protected branch
+func (protectBranch *ProtectedBranch) CanUserPush(ctx context.Context, user *user_model.User) bool {
+ if !protectBranch.CanPush {
+ return false
+ }
+
+ if !protectBranch.EnableWhitelist {
+ if err := protectBranch.LoadRepo(ctx); err != nil {
+ log.Error("LoadRepo: %v", err)
+ return false
+ }
+
+ writeAccess, err := access_model.HasAccessUnit(ctx, user, protectBranch.Repo, unit.TypeCode, perm.AccessModeWrite)
+ if err != nil {
+ log.Error("HasAccessUnit: %v", err)
+ return false
+ }
+ return writeAccess
+ }
+
+ if slices.Contains(protectBranch.WhitelistUserIDs, user.ID) {
+ return true
+ }
+
+ if len(protectBranch.WhitelistTeamIDs) == 0 {
+ return false
+ }
+
+ in, err := organization.IsUserInTeams(ctx, user.ID, protectBranch.WhitelistTeamIDs)
+ if err != nil {
+ log.Error("IsUserInTeams: %v", err)
+ return false
+ }
+ return in
+}
+
+// IsUserMergeWhitelisted checks if some user is whitelisted to merge to this branch
+func IsUserMergeWhitelisted(ctx context.Context, protectBranch *ProtectedBranch, userID int64, permissionInRepo access_model.Permission) bool {
+ if !protectBranch.EnableMergeWhitelist {
+ // Then we need to fall back on whether the user has write permission
+ return permissionInRepo.CanWrite(unit.TypeCode)
+ }
+
+ if slices.Contains(protectBranch.MergeWhitelistUserIDs, userID) {
+ return true
+ }
+
+ if len(protectBranch.MergeWhitelistTeamIDs) == 0 {
+ return false
+ }
+
+ in, err := organization.IsUserInTeams(ctx, userID, protectBranch.MergeWhitelistTeamIDs)
+ if err != nil {
+ log.Error("IsUserInTeams: %v", err)
+ return false
+ }
+ return in
+}
+
+// IsUserOfficialReviewer check if user is official reviewer for the branch (counts towards required approvals)
+func IsUserOfficialReviewer(ctx context.Context, protectBranch *ProtectedBranch, user *user_model.User) (bool, error) {
+ repo, err := repo_model.GetRepositoryByID(ctx, protectBranch.RepoID)
+ if err != nil {
+ return false, err
+ }
+
+ if !protectBranch.EnableApprovalsWhitelist {
+ // Anyone with write access is considered official reviewer
+ writeAccess, err := access_model.HasAccessUnit(ctx, user, repo, unit.TypeCode, perm.AccessModeWrite)
+ if err != nil {
+ return false, err
+ }
+ return writeAccess, nil
+ }
+
+ if slices.Contains(protectBranch.ApprovalsWhitelistUserIDs, user.ID) {
+ return true, nil
+ }
+
+ inTeam, err := organization.IsUserInTeams(ctx, user.ID, protectBranch.ApprovalsWhitelistTeamIDs)
+ if err != nil {
+ return false, err
+ }
+
+ return inTeam, nil
+}
+
+// GetProtectedFilePatterns parses a semicolon separated list of protected file patterns and returns a glob.Glob slice
+func (protectBranch *ProtectedBranch) GetProtectedFilePatterns() []glob.Glob {
+ return getFilePatterns(protectBranch.ProtectedFilePatterns)
+}
+
+// GetUnprotectedFilePatterns parses a semicolon separated list of unprotected file patterns and returns a glob.Glob slice
+func (protectBranch *ProtectedBranch) GetUnprotectedFilePatterns() []glob.Glob {
+ return getFilePatterns(protectBranch.UnprotectedFilePatterns)
+}
+
+func getFilePatterns(filePatterns string) []glob.Glob {
+ extarr := make([]glob.Glob, 0, 10)
+ for _, expr := range strings.Split(strings.ToLower(filePatterns), ";") {
+ expr = strings.TrimSpace(expr)
+ if expr != "" {
+ if g, err := glob.Compile(expr, '.', '/'); err != nil {
+ log.Info("Invalid glob expression '%s' (skipped): %v", expr, err)
+ } else {
+ extarr = append(extarr, g)
+ }
+ }
+ }
+ return extarr
+}
+
+// MergeBlockedByProtectedFiles returns true if merge is blocked by protected files change
+func (protectBranch *ProtectedBranch) MergeBlockedByProtectedFiles(changedProtectedFiles []string) bool {
+ glob := protectBranch.GetProtectedFilePatterns()
+ if len(glob) == 0 {
+ return false
+ }
+
+ return len(changedProtectedFiles) > 0
+}
+
+// IsProtectedFile return if path is protected
+func (protectBranch *ProtectedBranch) IsProtectedFile(patterns []glob.Glob, path string) bool {
+ if len(patterns) == 0 {
+ patterns = protectBranch.GetProtectedFilePatterns()
+ if len(patterns) == 0 {
+ return false
+ }
+ }
+
+ lpath := strings.ToLower(strings.TrimSpace(path))
+
+ r := false
+ for _, pat := range patterns {
+ if pat.Match(lpath) {
+ r = true
+ break
+ }
+ }
+
+ return r
+}
+
+// IsUnprotectedFile return if path is unprotected
+func (protectBranch *ProtectedBranch) IsUnprotectedFile(patterns []glob.Glob, path string) bool {
+ if len(patterns) == 0 {
+ patterns = protectBranch.GetUnprotectedFilePatterns()
+ if len(patterns) == 0 {
+ return false
+ }
+ }
+
+ lpath := strings.ToLower(strings.TrimSpace(path))
+
+ r := false
+ for _, pat := range patterns {
+ if pat.Match(lpath) {
+ r = true
+ break
+ }
+ }
+
+ return r
+}
+
+// GetProtectedBranchRuleByName getting protected branch rule by name
+func GetProtectedBranchRuleByName(ctx context.Context, repoID int64, ruleName string) (*ProtectedBranch, error) {
+ // branch_name is legacy name, it actually is rule name
+ rel, exist, err := db.Get[ProtectedBranch](ctx, builder.Eq{"repo_id": repoID, "branch_name": ruleName})
+ if err != nil {
+ return nil, err
+ } else if !exist {
+ return nil, nil
+ }
+ return rel, nil
+}
+
+// GetProtectedBranchRuleByID getting protected branch rule by rule ID
+func GetProtectedBranchRuleByID(ctx context.Context, repoID, ruleID int64) (*ProtectedBranch, error) {
+ rel, exist, err := db.Get[ProtectedBranch](ctx, builder.Eq{"repo_id": repoID, "id": ruleID})
+ if err != nil {
+ return nil, err
+ } else if !exist {
+ return nil, nil
+ }
+ return rel, nil
+}
+
+// WhitelistOptions represent all sorts of whitelists used for protected branches
+type WhitelistOptions struct {
+ UserIDs []int64
+ TeamIDs []int64
+
+ MergeUserIDs []int64
+ MergeTeamIDs []int64
+
+ ApprovalsUserIDs []int64
+ ApprovalsTeamIDs []int64
+}
+
+// UpdateProtectBranch saves branch protection options of repository.
+// If ID is 0, it creates a new record. Otherwise, updates existing record.
+// This function also performs check if whitelist user and team's IDs have been changed
+// to avoid unnecessary whitelist delete and regenerate.
+func UpdateProtectBranch(ctx context.Context, repo *repo_model.Repository, protectBranch *ProtectedBranch, opts WhitelistOptions) (err error) {
+ err = repo.MustNotBeArchived()
+ if err != nil {
+ return err
+ }
+
+ if err = repo.LoadOwner(ctx); err != nil {
+ return fmt.Errorf("LoadOwner: %v", err)
+ }
+
+ whitelist, err := updateUserWhitelist(ctx, repo, protectBranch.WhitelistUserIDs, opts.UserIDs)
+ if err != nil {
+ return err
+ }
+ protectBranch.WhitelistUserIDs = whitelist
+
+ whitelist, err = updateUserWhitelist(ctx, repo, protectBranch.MergeWhitelistUserIDs, opts.MergeUserIDs)
+ if err != nil {
+ return err
+ }
+ protectBranch.MergeWhitelistUserIDs = whitelist
+
+ whitelist, err = updateApprovalWhitelist(ctx, repo, protectBranch.ApprovalsWhitelistUserIDs, opts.ApprovalsUserIDs)
+ if err != nil {
+ return err
+ }
+ protectBranch.ApprovalsWhitelistUserIDs = whitelist
+
+ // if the repo is in an organization
+ whitelist, err = updateTeamWhitelist(ctx, repo, protectBranch.WhitelistTeamIDs, opts.TeamIDs)
+ if err != nil {
+ return err
+ }
+ protectBranch.WhitelistTeamIDs = whitelist
+
+ whitelist, err = updateTeamWhitelist(ctx, repo, protectBranch.MergeWhitelistTeamIDs, opts.MergeTeamIDs)
+ if err != nil {
+ return err
+ }
+ protectBranch.MergeWhitelistTeamIDs = whitelist
+
+ whitelist, err = updateTeamWhitelist(ctx, repo, protectBranch.ApprovalsWhitelistTeamIDs, opts.ApprovalsTeamIDs)
+ if err != nil {
+ return err
+ }
+ protectBranch.ApprovalsWhitelistTeamIDs = whitelist
+
+ // Make sure protectBranch.ID is not 0 for whitelists
+ if protectBranch.ID == 0 {
+ if _, err = db.GetEngine(ctx).Insert(protectBranch); err != nil {
+ return fmt.Errorf("Insert: %v", err)
+ }
+ return nil
+ }
+
+ if _, err = db.GetEngine(ctx).ID(protectBranch.ID).AllCols().Update(protectBranch); err != nil {
+ return fmt.Errorf("Update: %v", err)
+ }
+
+ return nil
+}
+
+// updateApprovalWhitelist checks whether the user whitelist changed and returns a whitelist with
+// the users from newWhitelist which have explicit read or write access to the repo.
+func updateApprovalWhitelist(ctx context.Context, repo *repo_model.Repository, currentWhitelist, newWhitelist []int64) (whitelist []int64, err error) {
+ hasUsersChanged := !util.SliceSortedEqual(currentWhitelist, newWhitelist)
+ if !hasUsersChanged {
+ return currentWhitelist, nil
+ }
+
+ whitelist = make([]int64, 0, len(newWhitelist))
+ for _, userID := range newWhitelist {
+ if reader, err := access_model.IsRepoReader(ctx, repo, userID); err != nil {
+ return nil, err
+ } else if !reader {
+ continue
+ }
+ whitelist = append(whitelist, userID)
+ }
+
+ return whitelist, err
+}
+
+// updateUserWhitelist checks whether the user whitelist changed and returns a whitelist with
+// the users from newWhitelist which have write access to the repo.
+func updateUserWhitelist(ctx context.Context, repo *repo_model.Repository, currentWhitelist, newWhitelist []int64) (whitelist []int64, err error) {
+ hasUsersChanged := !util.SliceSortedEqual(currentWhitelist, newWhitelist)
+ if !hasUsersChanged {
+ return currentWhitelist, nil
+ }
+
+ whitelist = make([]int64, 0, len(newWhitelist))
+ for _, userID := range newWhitelist {
+ user, err := user_model.GetUserByID(ctx, userID)
+ if err != nil {
+ return nil, fmt.Errorf("GetUserByID [user_id: %d, repo_id: %d]: %v", userID, repo.ID, err)
+ }
+ perm, err := access_model.GetUserRepoPermission(ctx, repo, user)
+ if err != nil {
+ return nil, fmt.Errorf("GetUserRepoPermission [user_id: %d, repo_id: %d]: %v", userID, repo.ID, err)
+ }
+
+ if !perm.CanWrite(unit.TypeCode) {
+ continue // Drop invalid user ID
+ }
+
+ whitelist = append(whitelist, userID)
+ }
+
+ return whitelist, err
+}
+
+// updateTeamWhitelist checks whether the team whitelist changed and returns a whitelist with
+// the teams from newWhitelist which have write access to the repo.
+func updateTeamWhitelist(ctx context.Context, repo *repo_model.Repository, currentWhitelist, newWhitelist []int64) (whitelist []int64, err error) {
+ hasTeamsChanged := !util.SliceSortedEqual(currentWhitelist, newWhitelist)
+ if !hasTeamsChanged {
+ return currentWhitelist, nil
+ }
+
+ teams, err := organization.GetTeamsWithAccessToRepo(ctx, repo.OwnerID, repo.ID, perm.AccessModeRead)
+ if err != nil {
+ return nil, fmt.Errorf("GetTeamsWithAccessToRepo [org_id: %d, repo_id: %d]: %v", repo.OwnerID, repo.ID, err)
+ }
+
+ whitelist = make([]int64, 0, len(teams))
+ for i := range teams {
+ if slices.Contains(newWhitelist, teams[i].ID) {
+ whitelist = append(whitelist, teams[i].ID)
+ }
+ }
+
+ return whitelist, err
+}
+
+// DeleteProtectedBranch removes ProtectedBranch relation between the user and repository.
+func DeleteProtectedBranch(ctx context.Context, repo *repo_model.Repository, id int64) (err error) {
+ err = repo.MustNotBeArchived()
+ if err != nil {
+ return err
+ }
+
+ protectedBranch := &ProtectedBranch{
+ RepoID: repo.ID,
+ ID: id,
+ }
+
+ if affected, err := db.GetEngine(ctx).Delete(protectedBranch); err != nil {
+ return err
+ } else if affected != 1 {
+ return fmt.Errorf("delete protected branch ID(%v) failed", id)
+ }
+
+ return nil
+}
+
+// RemoveUserIDFromProtectedBranch remove all user ids from protected branch options
+func RemoveUserIDFromProtectedBranch(ctx context.Context, p *ProtectedBranch, userID int64) error {
+ lenIDs, lenApprovalIDs, lenMergeIDs := len(p.WhitelistUserIDs), len(p.ApprovalsWhitelistUserIDs), len(p.MergeWhitelistUserIDs)
+ p.WhitelistUserIDs = util.SliceRemoveAll(p.WhitelistUserIDs, userID)
+ p.ApprovalsWhitelistUserIDs = util.SliceRemoveAll(p.ApprovalsWhitelistUserIDs, userID)
+ p.MergeWhitelistUserIDs = util.SliceRemoveAll(p.MergeWhitelistUserIDs, userID)
+
+ if lenIDs != len(p.WhitelistUserIDs) || lenApprovalIDs != len(p.ApprovalsWhitelistUserIDs) ||
+ lenMergeIDs != len(p.MergeWhitelistUserIDs) {
+ if _, err := db.GetEngine(ctx).ID(p.ID).Cols(
+ "whitelist_user_i_ds",
+ "merge_whitelist_user_i_ds",
+ "approvals_whitelist_user_i_ds",
+ ).Update(p); err != nil {
+ return fmt.Errorf("updateProtectedBranches: %v", err)
+ }
+ }
+ return nil
+}
+
+// RemoveTeamIDFromProtectedBranch remove all team ids from protected branch options
+func RemoveTeamIDFromProtectedBranch(ctx context.Context, p *ProtectedBranch, teamID int64) error {
+ lenIDs, lenApprovalIDs, lenMergeIDs := len(p.WhitelistTeamIDs), len(p.ApprovalsWhitelistTeamIDs), len(p.MergeWhitelistTeamIDs)
+ p.WhitelistTeamIDs = util.SliceRemoveAll(p.WhitelistTeamIDs, teamID)
+ p.ApprovalsWhitelistTeamIDs = util.SliceRemoveAll(p.ApprovalsWhitelistTeamIDs, teamID)
+ p.MergeWhitelistTeamIDs = util.SliceRemoveAll(p.MergeWhitelistTeamIDs, teamID)
+
+ if lenIDs != len(p.WhitelistTeamIDs) ||
+ lenApprovalIDs != len(p.ApprovalsWhitelistTeamIDs) ||
+ lenMergeIDs != len(p.MergeWhitelistTeamIDs) {
+ if _, err := db.GetEngine(ctx).ID(p.ID).Cols(
+ "whitelist_team_i_ds",
+ "merge_whitelist_team_i_ds",
+ "approvals_whitelist_team_i_ds",
+ ).Update(p); err != nil {
+ return fmt.Errorf("updateProtectedBranches: %v", err)
+ }
+ }
+ return nil
+}
diff --git a/models/git/protected_branch_list.go b/models/git/protected_branch_list.go
new file mode 100644
index 00000000..613333a5
--- /dev/null
+++ b/models/git/protected_branch_list.go
@@ -0,0 +1,95 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+ "sort"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/modules/optional"
+
+ "github.com/gobwas/glob"
+)
+
+type ProtectedBranchRules []*ProtectedBranch
+
+func (rules ProtectedBranchRules) GetFirstMatched(branchName string) *ProtectedBranch {
+ for _, rule := range rules {
+ if rule.Match(branchName) {
+ return rule
+ }
+ }
+ return nil
+}
+
+func (rules ProtectedBranchRules) sort() {
+ sort.Slice(rules, func(i, j int) bool {
+ rules[i].loadGlob()
+ rules[j].loadGlob()
+ if rules[i].isPlainName != rules[j].isPlainName {
+ return rules[i].isPlainName // plain name comes first, so plain name means "less"
+ }
+ return rules[i].CreatedUnix < rules[j].CreatedUnix
+ })
+}
+
+// FindRepoProtectedBranchRules load all repository's protected rules
+func FindRepoProtectedBranchRules(ctx context.Context, repoID int64) (ProtectedBranchRules, error) {
+ var rules ProtectedBranchRules
+ err := db.GetEngine(ctx).Where("repo_id = ?", repoID).Asc("created_unix").Find(&rules)
+ if err != nil {
+ return nil, err
+ }
+ rules.sort() // to make non-glob rules have higher priority, and for same glob/non-glob rules, first created rules have higher priority
+ return rules, nil
+}
+
+// FindAllMatchedBranches find all matched branches
+func FindAllMatchedBranches(ctx context.Context, repoID int64, ruleName string) ([]string, error) {
+ results := make([]string, 0, 10)
+ for page := 1; ; page++ {
+ brancheNames, err := FindBranchNames(ctx, FindBranchOptions{
+ ListOptions: db.ListOptions{
+ PageSize: 100,
+ Page: page,
+ },
+ RepoID: repoID,
+ IsDeletedBranch: optional.Some(false),
+ })
+ if err != nil {
+ return nil, err
+ }
+ rule := glob.MustCompile(ruleName)
+
+ for _, branch := range brancheNames {
+ if rule.Match(branch) {
+ results = append(results, branch)
+ }
+ }
+ if len(brancheNames) < 100 {
+ break
+ }
+ }
+
+ return results, nil
+}
+
+// GetFirstMatchProtectedBranchRule returns the first matched rules
+func GetFirstMatchProtectedBranchRule(ctx context.Context, repoID int64, branchName string) (*ProtectedBranch, error) {
+ rules, err := FindRepoProtectedBranchRules(ctx, repoID)
+ if err != nil {
+ return nil, err
+ }
+ return rules.GetFirstMatched(branchName), nil
+}
+
+// IsBranchProtected checks if branch is protected
+func IsBranchProtected(ctx context.Context, repoID int64, branchName string) (bool, error) {
+ rule, err := GetFirstMatchProtectedBranchRule(ctx, repoID, branchName)
+ if err != nil {
+ return false, err
+ }
+ return rule != nil, nil
+}
diff --git a/models/git/protected_branch_test.go b/models/git/protected_branch_test.go
new file mode 100644
index 00000000..1962859a
--- /dev/null
+++ b/models/git/protected_branch_test.go
@@ -0,0 +1,78 @@
+// Copyright 2022 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestBranchRuleMatch(t *testing.T) {
+ kases := []struct {
+ Rule string
+ BranchName string
+ ExpectedMatch bool
+ }{
+ {
+ Rule: "release/*",
+ BranchName: "release/v1.17",
+ ExpectedMatch: true,
+ },
+ {
+ Rule: "release/**/v1.17",
+ BranchName: "release/test/v1.17",
+ ExpectedMatch: true,
+ },
+ {
+ Rule: "release/**/v1.17",
+ BranchName: "release/test/1/v1.17",
+ ExpectedMatch: true,
+ },
+ {
+ Rule: "release/*/v1.17",
+ BranchName: "release/test/1/v1.17",
+ ExpectedMatch: false,
+ },
+ {
+ Rule: "release/v*",
+ BranchName: "release/v1.16",
+ ExpectedMatch: true,
+ },
+ {
+ Rule: "*",
+ BranchName: "release/v1.16",
+ ExpectedMatch: false,
+ },
+ {
+ Rule: "**",
+ BranchName: "release/v1.16",
+ ExpectedMatch: true,
+ },
+ {
+ Rule: "main",
+ BranchName: "main",
+ ExpectedMatch: true,
+ },
+ {
+ Rule: "master",
+ BranchName: "main",
+ ExpectedMatch: false,
+ },
+ }
+
+ for _, kase := range kases {
+ pb := ProtectedBranch{RuleName: kase.Rule}
+ var should, infact string
+ if !kase.ExpectedMatch {
+ should = " not"
+ } else {
+ infact = " not"
+ }
+ assert.EqualValues(t, kase.ExpectedMatch, pb.Match(kase.BranchName),
+ fmt.Sprintf("%s should%s match %s but it is%s", kase.BranchName, should, kase.Rule, infact),
+ )
+ }
+}
diff --git a/models/git/protected_tag.go b/models/git/protected_tag.go
new file mode 100644
index 00000000..9a6646c7
--- /dev/null
+++ b/models/git/protected_tag.go
@@ -0,0 +1,150 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git
+
+import (
+ "context"
+ "regexp"
+ "slices"
+ "strings"
+
+ "code.gitea.io/gitea/models/db"
+ "code.gitea.io/gitea/models/organization"
+ "code.gitea.io/gitea/modules/timeutil"
+
+ "github.com/gobwas/glob"
+)
+
+// ProtectedTag struct
+type ProtectedTag struct {
+ ID int64 `xorm:"pk autoincr"`
+ RepoID int64
+ NamePattern string
+ RegexPattern *regexp.Regexp `xorm:"-"`
+ GlobPattern glob.Glob `xorm:"-"`
+ AllowlistUserIDs []int64 `xorm:"JSON TEXT"`
+ AllowlistTeamIDs []int64 `xorm:"JSON TEXT"`
+
+ CreatedUnix timeutil.TimeStamp `xorm:"created"`
+ UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
+}
+
+func init() {
+ db.RegisterModel(new(ProtectedTag))
+}
+
+// EnsureCompiledPattern ensures the glob pattern is compiled
+func (pt *ProtectedTag) EnsureCompiledPattern() error {
+ if pt.RegexPattern != nil || pt.GlobPattern != nil {
+ return nil
+ }
+
+ var err error
+ if len(pt.NamePattern) >= 2 && strings.HasPrefix(pt.NamePattern, "/") && strings.HasSuffix(pt.NamePattern, "/") {
+ pt.RegexPattern, err = regexp.Compile(pt.NamePattern[1 : len(pt.NamePattern)-1])
+ } else {
+ pt.GlobPattern, err = glob.Compile(pt.NamePattern)
+ }
+ return err
+}
+
+func (pt *ProtectedTag) matchString(name string) bool {
+ if pt.RegexPattern != nil {
+ return pt.RegexPattern.MatchString(name)
+ }
+ return pt.GlobPattern.Match(name)
+}
+
+// InsertProtectedTag inserts a protected tag to database
+func InsertProtectedTag(ctx context.Context, pt *ProtectedTag) error {
+ _, err := db.GetEngine(ctx).Insert(pt)
+ return err
+}
+
+// UpdateProtectedTag updates the protected tag
+func UpdateProtectedTag(ctx context.Context, pt *ProtectedTag) error {
+ _, err := db.GetEngine(ctx).ID(pt.ID).AllCols().Update(pt)
+ return err
+}
+
+// DeleteProtectedTag deletes a protected tag by ID
+func DeleteProtectedTag(ctx context.Context, pt *ProtectedTag) error {
+ _, err := db.GetEngine(ctx).ID(pt.ID).Delete(&ProtectedTag{})
+ return err
+}
+
+// IsUserAllowedModifyTag returns true if the user is allowed to modify the tag
+func IsUserAllowedModifyTag(ctx context.Context, pt *ProtectedTag, userID int64) (bool, error) {
+ if slices.Contains(pt.AllowlistUserIDs, userID) {
+ return true, nil
+ }
+
+ if len(pt.AllowlistTeamIDs) == 0 {
+ return false, nil
+ }
+
+ in, err := organization.IsUserInTeams(ctx, userID, pt.AllowlistTeamIDs)
+ if err != nil {
+ return false, err
+ }
+ return in, nil
+}
+
+// GetProtectedTags gets all protected tags of the repository
+func GetProtectedTags(ctx context.Context, repoID int64) ([]*ProtectedTag, error) {
+ tags := make([]*ProtectedTag, 0)
+ return tags, db.GetEngine(ctx).Find(&tags, &ProtectedTag{RepoID: repoID})
+}
+
+// GetProtectedTagByID gets the protected tag with the specific id
+func GetProtectedTagByID(ctx context.Context, id int64) (*ProtectedTag, error) {
+ tag := new(ProtectedTag)
+ has, err := db.GetEngine(ctx).ID(id).Get(tag)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, nil
+ }
+ return tag, nil
+}
+
+// GetProtectedTagByNamePattern gets protected tag by name_pattern
+func GetProtectedTagByNamePattern(ctx context.Context, repoID int64, pattern string) (*ProtectedTag, error) {
+ tag := &ProtectedTag{NamePattern: pattern, RepoID: repoID}
+ has, err := db.GetEngine(ctx).Get(tag)
+ if err != nil {
+ return nil, err
+ }
+ if !has {
+ return nil, nil
+ }
+ return tag, nil
+}
+
+// IsUserAllowedToControlTag checks if a user can control the specific tag.
+// It returns true if the tag name is not protected or the user is allowed to control it.
+func IsUserAllowedToControlTag(ctx context.Context, tags []*ProtectedTag, tagName string, userID int64) (bool, error) {
+ isAllowed := true
+ for _, tag := range tags {
+ err := tag.EnsureCompiledPattern()
+ if err != nil {
+ return false, err
+ }
+
+ if !tag.matchString(tagName) {
+ continue
+ }
+
+ isAllowed, err = IsUserAllowedModifyTag(ctx, tag, userID)
+ if err != nil {
+ return false, err
+ }
+ if isAllowed {
+ break
+ }
+ }
+
+ return isAllowed, nil
+}
diff --git a/models/git/protected_tag_test.go b/models/git/protected_tag_test.go
new file mode 100644
index 00000000..796e1594
--- /dev/null
+++ b/models/git/protected_tag_test.go
@@ -0,0 +1,166 @@
+// Copyright 2021 The Gitea Authors. All rights reserved.
+// SPDX-License-Identifier: MIT
+
+package git_test
+
+import (
+ "testing"
+
+ "code.gitea.io/gitea/models/db"
+ git_model "code.gitea.io/gitea/models/git"
+ "code.gitea.io/gitea/models/unittest"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestIsUserAllowed(t *testing.T) {
+ require.NoError(t, unittest.PrepareTestDatabase())
+
+ pt := &git_model.ProtectedTag{}
+ allowed, err := git_model.IsUserAllowedModifyTag(db.DefaultContext, pt, 1)
+ require.NoError(t, err)
+ assert.False(t, allowed)
+
+ pt = &git_model.ProtectedTag{
+ AllowlistUserIDs: []int64{1},
+ }
+ allowed, err = git_model.IsUserAllowedModifyTag(db.DefaultContext, pt, 1)
+ require.NoError(t, err)
+ assert.True(t, allowed)
+
+ allowed, err = git_model.IsUserAllowedModifyTag(db.DefaultContext, pt, 2)
+ require.NoError(t, err)
+ assert.False(t, allowed)
+
+ pt = &git_model.ProtectedTag{
+ AllowlistTeamIDs: []int64{1},
+ }
+ allowed, err = git_model.IsUserAllowedModifyTag(db.DefaultContext, pt, 1)
+ require.NoError(t, err)
+ assert.False(t, allowed)
+
+ allowed, err = git_model.IsUserAllowedModifyTag(db.DefaultContext, pt, 2)
+ require.NoError(t, err)
+ assert.True(t, allowed)
+
+ pt = &git_model.ProtectedTag{
+ AllowlistUserIDs: []int64{1},
+ AllowlistTeamIDs: []int64{1},
+ }
+ allowed, err = git_model.IsUserAllowedModifyTag(db.DefaultContext, pt, 1)
+ require.NoError(t, err)
+ assert.True(t, allowed)
+
+ allowed, err = git_model.IsUserAllowedModifyTag(db.DefaultContext, pt, 2)
+ require.NoError(t, err)
+ assert.True(t, allowed)
+}
+
+func TestIsUserAllowedToControlTag(t *testing.T) {
+ cases := []struct {
+ name string
+ userid int64
+ allowed bool
+ }{
+ {
+ name: "test",
+ userid: 1,
+ allowed: true,
+ },
+ {
+ name: "test",
+ userid: 3,
+ allowed: true,
+ },
+ {
+ name: "gitea",
+ userid: 1,
+ allowed: true,
+ },
+ {
+ name: "gitea",
+ userid: 3,
+ allowed: false,
+ },
+ {
+ name: "test-gitea",
+ userid: 1,
+ allowed: true,
+ },
+ {
+ name: "test-gitea",
+ userid: 3,
+ allowed: false,
+ },
+ {
+ name: "gitea-test",
+ userid: 1,
+ allowed: true,
+ },
+ {
+ name: "gitea-test",
+ userid: 3,
+ allowed: true,
+ },
+ {
+ name: "v-1",
+ userid: 1,
+ allowed: false,
+ },
+ {
+ name: "v-1",
+ userid: 2,
+ allowed: true,
+ },
+ {
+ name: "release",
+ userid: 1,
+ allowed: false,
+ },
+ }
+
+ t.Run("Glob", func(t *testing.T) {
+ protectedTags := []*git_model.ProtectedTag{
+ {
+ NamePattern: `*gitea`,
+ AllowlistUserIDs: []int64{1},
+ },
+ {
+ NamePattern: `v-*`,
+ AllowlistUserIDs: []int64{2},
+ },
+ {
+ NamePattern: "release",
+ },
+ }
+
+ for n, c := range cases {
+ isAllowed, err := git_model.IsUserAllowedToControlTag(db.DefaultContext, protectedTags, c.name, c.userid)
+ require.NoError(t, err)
+ assert.Equal(t, c.allowed, isAllowed, "case %d: error should match", n)
+ }
+ })
+
+ t.Run("Regex", func(t *testing.T) {
+ protectedTags := []*git_model.ProtectedTag{
+ {
+ NamePattern: `/gitea\z/`,
+ AllowlistUserIDs: []int64{1},
+ },
+ {
+ NamePattern: `/\Av-/`,
+ AllowlistUserIDs: []int64{2},
+ },
+ {
+ NamePattern: "/release/",
+ },
+ }
+
+ for n, c := range cases {
+ isAllowed, err := git_model.IsUserAllowedToControlTag(db.DefaultContext, protectedTags, c.name, c.userid)
+ require.NoError(t, err)
+ assert.Equal(t, c.allowed, isAllowed, "case %d: error should match", n)
+ }
+ })
+}