2014-03-22 23:20:50 +05:30
// Copyright 2014 The Gogs Authors. All rights reserved.
2018-11-28 16:56:14 +05:30
// Copyright 2018 The Gitea Authors. All rights reserved.
2022-11-27 23:50:29 +05:30
// SPDX-License-Identifier: MIT
2014-03-22 23:20:50 +05:30
package repo
2014-07-26 11:58:04 +05:30
import (
2017-01-25 08:13:02 +05:30
"bytes"
2022-04-28 17:18:48 +05:30
stdCtx "context"
2014-07-26 11:58:04 +05:30
"errors"
"fmt"
2024-03-02 20:35:07 +05:30
"html/template"
2022-01-21 23:29:26 +05:30
"math/big"
2018-07-18 02:53:58 +05:30
"net/http"
2021-11-16 23:48:25 +05:30
"net/url"
2023-09-07 15:07:47 +05:30
"slices"
2022-09-02 13:28:49 +05:30
"sort"
2017-03-15 06:40:35 +05:30
"strconv"
2014-07-26 11:58:04 +05:30
"strings"
2022-04-08 00:29:56 +05:30
"time"
2014-07-26 11:58:04 +05:30
2022-08-25 08:01:57 +05:30
activities_model "code.gitea.io/gitea/models/activities"
2021-09-24 17:02:56 +05:30
"code.gitea.io/gitea/models/db"
2022-06-12 21:21:54 +05:30
git_model "code.gitea.io/gitea/models/git"
2022-03-31 14:50:39 +05:30
issues_model "code.gitea.io/gitea/models/issues"
2022-03-29 11:59:02 +05:30
"code.gitea.io/gitea/models/organization"
2022-05-11 15:39:36 +05:30
access_model "code.gitea.io/gitea/models/perm/access"
2022-03-29 19:46:31 +05:30
project_model "code.gitea.io/gitea/models/project"
2022-05-07 22:35:52 +05:30
pull_model "code.gitea.io/gitea/models/pull"
2021-11-19 19:09:57 +05:30
repo_model "code.gitea.io/gitea/models/repo"
2021-11-10 01:27:58 +05:30
"code.gitea.io/gitea/models/unit"
2021-11-24 15:19:20 +05:30
user_model "code.gitea.io/gitea/models/user"
2016-11-10 21:54:48 +05:30
"code.gitea.io/gitea/modules/base"
2022-10-12 10:48:26 +05:30
"code.gitea.io/gitea/modules/container"
2024-02-25 04:04:51 +05:30
"code.gitea.io/gitea/modules/emoji"
2019-03-27 15:03:00 +05:30
"code.gitea.io/gitea/modules/git"
2019-02-21 06:24:05 +05:30
issue_indexer "code.gitea.io/gitea/modules/indexer/issues"
2022-09-02 13:28:49 +05:30
issue_template "code.gitea.io/gitea/modules/issue/template"
2016-11-10 21:54:48 +05:30
"code.gitea.io/gitea/modules/log"
2019-12-07 09:51:18 +05:30
"code.gitea.io/gitea/modules/markup"
2017-09-21 10:50:14 +05:30
"code.gitea.io/gitea/modules/markup/markdown"
2024-03-01 00:22:49 +05:30
"code.gitea.io/gitea/modules/optional"
2023-05-08 12:09:32 +05:30
repo_module "code.gitea.io/gitea/modules/repository"
2016-11-10 21:54:48 +05:30
"code.gitea.io/gitea/modules/setting"
2019-06-06 06:07:45 +05:30
api "code.gitea.io/gitea/modules/structs"
2024-03-01 12:41:51 +05:30
"code.gitea.io/gitea/modules/templates"
2022-04-01 14:17:50 +05:30
"code.gitea.io/gitea/modules/templates/vars"
2022-04-08 00:29:56 +05:30
"code.gitea.io/gitea/modules/timeutil"
2017-01-25 08:13:02 +05:30
"code.gitea.io/gitea/modules/util"
2021-01-26 21:06:53 +05:30
"code.gitea.io/gitea/modules/web"
2022-09-02 13:28:49 +05:30
"code.gitea.io/gitea/routers/utils"
2021-12-10 13:44:24 +05:30
asymkey_service "code.gitea.io/gitea/services/asymkey"
2024-02-27 12:42:22 +05:30
"code.gitea.io/gitea/services/context"
"code.gitea.io/gitea/services/context/upload"
2022-12-29 08:27:15 +05:30
"code.gitea.io/gitea/services/convert"
2021-04-07 01:14:05 +05:30
"code.gitea.io/gitea/services/forms"
2019-09-30 19:20:44 +05:30
issue_service "code.gitea.io/gitea/services/issue"
2019-12-07 08:14:10 +05:30
pull_service "code.gitea.io/gitea/services/pull"
2022-06-06 13:31:49 +05:30
repo_service "code.gitea.io/gitea/services/repository"
2014-07-26 11:58:04 +05:30
)
const (
2019-10-15 17:49:32 +05:30
tplAttachment base . TplName = "repo/issue/view_content/attachments"
2020-09-11 20:18:39 +05:30
tplIssues base . TplName = "repo/issue/list"
tplIssueNew base . TplName = "repo/issue/new"
tplIssueChoose base . TplName = "repo/issue/choose"
tplIssueView base . TplName = "repo/issue/view"
2014-07-26 11:58:04 +05:30
2017-12-04 04:44:26 +05:30
tplReactions base . TplName = "repo/issue/view_content/reactions"
2020-09-11 20:18:39 +05:30
issueTemplateKey = "IssueTemplate"
issueTemplateTitleKey = "IssueTemplateTitle"
2014-07-26 11:58:04 +05:30
)
2022-01-20 23:16:10 +05:30
// IssueTemplateCandidates issue templates
var IssueTemplateCandidates = [ ] string {
"ISSUE_TEMPLATE.md" ,
2022-09-02 13:28:49 +05:30
"ISSUE_TEMPLATE.yaml" ,
"ISSUE_TEMPLATE.yml" ,
2022-01-20 23:16:10 +05:30
"issue_template.md" ,
2022-09-02 13:28:49 +05:30
"issue_template.yaml" ,
"issue_template.yml" ,
2023-08-26 02:19:17 +05:30
".forgejo/ISSUE_TEMPLATE.md" ,
".forgejo/ISSUE_TEMPLATE.yaml" ,
".forgejo/ISSUE_TEMPLATE.yml" ,
".forgejo/issue_template.md" ,
".forgejo/issue_template.yaml" ,
".forgejo/issue_template.yml" ,
2022-01-20 23:16:10 +05:30
".gitea/ISSUE_TEMPLATE.md" ,
2022-09-02 13:28:49 +05:30
".gitea/ISSUE_TEMPLATE.yaml" ,
".gitea/ISSUE_TEMPLATE.yml" ,
".gitea/issue_template.md" ,
".gitea/issue_template.yaml" ,
2022-09-09 08:52:33 +05:30
".gitea/issue_template.yml" ,
2022-01-20 23:16:10 +05:30
".github/ISSUE_TEMPLATE.md" ,
2022-09-02 13:28:49 +05:30
".github/ISSUE_TEMPLATE.yaml" ,
".github/ISSUE_TEMPLATE.yml" ,
2022-01-20 23:16:10 +05:30
".github/issue_template.md" ,
2022-09-02 13:28:49 +05:30
".github/issue_template.yaml" ,
".github/issue_template.yml" ,
2022-01-20 23:16:10 +05:30
}
2014-07-26 11:58:04 +05:30
2019-02-19 02:25:04 +05:30
// MustAllowUserComment checks to make sure if an issue is locked.
// If locked and user has permissions to write to the repository,
// then the comment is allowed, else it is blocked
func MustAllowUserComment ( ctx * context . Context ) {
issue := GetActionIssue ( ctx )
if ctx . Written ( ) {
return
}
2022-03-22 12:33:22 +05:30
if issue . IsLocked && ! ctx . Repo . CanWriteIssuesOrPulls ( issue . IsPull ) && ! ctx . Doer . IsAdmin {
2019-02-19 02:25:04 +05:30
ctx . Flash . Error ( ctx . Tr ( "repo.issues.comment_on_locked" ) )
2023-02-11 12:04:11 +05:30
ctx . Redirect ( issue . Link ( ) )
2019-02-19 02:25:04 +05:30
return
}
}
2016-11-24 12:34:31 +05:30
// MustEnableIssues check if repository enable internal issues
2016-03-11 22:26:52 +05:30
func MustEnableIssues ( ctx * context . Context ) {
2021-11-10 01:27:58 +05:30
if ! ctx . Repo . CanRead ( unit . TypeIssues ) &&
! ctx . Repo . CanRead ( unit . TypeExternalTracker ) {
2018-01-11 03:04:17 +05:30
ctx . NotFound ( "MustEnableIssues" , nil )
2016-03-07 10:27:46 +05:30
return
2015-12-05 08:00:33 +05:30
}
2016-11-04 13:36:54 +05:30
2022-12-10 08:16:31 +05:30
unit , err := ctx . Repo . Repository . GetUnit ( ctx , unit . TypeExternalTracker )
2017-02-04 21:23:46 +05:30
if err == nil {
ctx . Redirect ( unit . ExternalTrackerConfig ( ) . ExternalTrackerURL )
2016-11-04 13:36:54 +05:30
return
}
2015-12-05 08:00:33 +05:30
}
2018-11-28 16:56:14 +05:30
// MustAllowPulls check if repository enable pull requests and user have right to do that
2016-03-11 22:26:52 +05:30
func MustAllowPulls ( ctx * context . Context ) {
2021-11-10 01:27:58 +05:30
if ! ctx . Repo . Repository . CanEnablePulls ( ) || ! ctx . Repo . CanRead ( unit . TypePullRequests ) {
2018-01-11 03:04:17 +05:30
ctx . NotFound ( "MustAllowPulls" , nil )
2016-03-07 10:27:46 +05:30
return
2015-12-05 08:00:33 +05:30
}
2015-12-20 08:37:06 +05:30
2016-03-07 10:27:46 +05:30
// User can send pull request if owns a forked repository.
2023-09-14 22:39:32 +05:30
if ctx . IsSigned && repo_model . HasForkedRepo ( ctx , ctx . Doer . ID , ctx . Repo . Repository . ID ) {
2016-03-07 10:27:46 +05:30
ctx . Repo . PullRequest . Allowed = true
2022-03-22 12:33:22 +05:30
ctx . Repo . PullRequest . HeadInfoSubURL = url . PathEscape ( ctx . Doer . Name ) + ":" + util . PathEscapeSegments ( ctx . Repo . BranchName )
2016-03-07 10:27:46 +05:30
}
2015-12-05 08:00:33 +05:30
}
2024-03-02 21:12:31 +05:30
func issues ( ctx * context . Context , milestoneID , projectID int64 , isPullOption optional . Option [ bool ] ) {
2018-11-29 07:16:30 +05:30
var err error
2021-08-11 06:01:13 +05:30
viewType := ctx . FormString ( "type" )
sortType := ctx . FormString ( "sort" )
2023-02-25 08:25:50 +05:30
types := [ ] string { "all" , "your_repositories" , "assigned" , "created_by" , "mentioned" , "review_requested" , "reviewed_by" }
Improve utils of slices (#22379)
- Move the file `compare.go` and `slice.go` to `slice.go`.
- Fix `ExistsInSlice`, it's buggy
- It uses `sort.Search`, so it assumes that the input slice is sorted.
- It passes `func(i int) bool { return slice[i] == target })` to
`sort.Search`, that's incorrect, check the doc of `sort.Search`.
- Conbine `IsInt64InSlice(int64, []int64)` and `ExistsInSlice(string,
[]string)` to `SliceContains[T]([]T, T)`.
- Conbine `IsSliceInt64Eq([]int64, []int64)` and `IsEqualSlice([]string,
[]string)` to `SliceSortedEqual[T]([]T, T)`.
- Add `SliceEqual[T]([]T, T)` as a distinction from
`SliceSortedEqual[T]([]T, T)`.
- Redesign `RemoveIDFromList([]int64, int64) ([]int64, bool)` to
`SliceRemoveAll[T]([]T, T) []T`.
- Add `SliceContainsFunc[T]([]T, func(T) bool)` and
`SliceRemoveAllFunc[T]([]T, func(T) bool)` for general use.
- Add comments to explain why not `golang.org/x/exp/slices`.
- Add unit tests.
2023-01-11 11:01:16 +05:30
if ! util . SliceContainsString ( types , viewType , true ) {
2014-07-26 11:58:04 +05:30
viewType = "all"
}
2015-08-15 09:37:08 +05:30
var (
2021-07-29 07:12:15 +05:30
assigneeID = ctx . FormInt64 ( "assignee" )
2022-08-09 01:33:58 +05:30
posterID = ctx . FormInt64 ( "poster" )
2021-01-17 22:04:19 +05:30
mentionedID int64
reviewRequestedID int64
2023-02-25 08:25:50 +05:30
reviewedID int64
2015-08-15 09:37:08 +05:30
)
2014-07-26 11:58:04 +05:30
2017-06-15 08:39:03 +05:30
if ctx . IsSigned {
switch viewType {
case "created_by" :
2022-03-22 12:33:22 +05:30
posterID = ctx . Doer . ID
2017-06-15 08:39:03 +05:30
case "mentioned" :
2022-03-22 12:33:22 +05:30
mentionedID = ctx . Doer . ID
2020-11-20 03:09:55 +05:30
case "assigned" :
2022-03-22 12:33:22 +05:30
assigneeID = ctx . Doer . ID
2021-01-17 22:04:19 +05:30
case "review_requested" :
2022-03-22 12:33:22 +05:30
reviewRequestedID = ctx . Doer . ID
2023-02-25 08:25:50 +05:30
case "reviewed_by" :
reviewedID = ctx . Doer . ID
2017-06-15 08:39:03 +05:30
}
}
2015-07-25 00:22:25 +05:30
repo := ctx . Repo . Repository
2019-01-23 09:40:38 +05:30
var labelIDs [ ] int64
2023-05-17 14:51:35 +05:30
// 1,-2 means including label 1 and excluding label 2
// 0 means issues with no label
// blank means labels will not be filtered for issues
2021-08-11 06:01:13 +05:30
selectLabels := ctx . FormString ( "labels" )
2023-07-26 18:30:50 +05:30
if selectLabels == "" {
ctx . Data [ "AllLabels" ] = true
} else if selectLabels == "0" {
ctx . Data [ "NoLabel" ] = true
2023-08-17 18:49:24 +05:30
}
if len ( selectLabels ) > 0 {
2019-01-23 09:40:38 +05:30
labelIDs , err = base . StringsToInt64s ( strings . Split ( selectLabels , "," ) )
if err != nil {
2024-03-21 20:37:35 +05:30
ctx . Flash . Error ( ctx . Tr ( "invalid_data" , selectLabels ) , true )
2019-01-23 09:40:38 +05:30
}
}
2016-12-24 16:03:21 +05:30
2021-08-11 06:01:13 +05:30
keyword := strings . Trim ( ctx . FormString ( "q" ) , " " )
2017-01-25 08:13:02 +05:30
if bytes . Contains ( [ ] byte ( keyword ) , [ ] byte { 0x00 } ) {
keyword = ""
}
2024-06-18 00:28:24 +05:30
isFuzzy := ctx . FormBool ( "fuzzy" )
2023-06-08 13:38:35 +05:30
var mileIDs [ ] int64
if milestoneID > 0 || milestoneID == db . NoConditionID { // -1 to get those issues which have no any milestone assigned
mileIDs = [ ] int64 { milestoneID }
}
2022-06-13 15:07:59 +05:30
var issueStats * issues_model . IssueStats
2023-10-19 19:38:31 +05:30
statsOpts := & issues_model . IssuesOptions {
RepoIDs : [ ] int64 { repo . ID } ,
LabelIDs : labelIDs ,
MilestoneIDs : mileIDs ,
ProjectID : projectID ,
AssigneeID : assigneeID ,
MentionedID : mentionedID ,
PosterID : posterID ,
ReviewRequestedID : reviewRequestedID ,
ReviewedID : reviewedID ,
IsPull : isPullOption ,
IssueIDs : nil ,
}
if keyword != "" {
2024-06-18 00:28:24 +05:30
allIssueIDs , err := issueIDsFromSearch ( ctx , keyword , isFuzzy , statsOpts )
2023-10-19 19:38:31 +05:30
if err != nil {
if issue_indexer . IsAvailable ( ctx ) {
ctx . ServerError ( "issueIDsFromSearch" , err )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
return
}
2023-10-19 19:38:31 +05:30
ctx . Data [ "IssueIndexerUnavailable" ] = true
return
2017-01-25 08:13:02 +05:30
}
2023-10-19 19:38:31 +05:30
statsOpts . IssueIDs = allIssueIDs
}
if keyword != "" && len ( statsOpts . IssueIDs ) == 0 {
// So it did search with the keyword, but no issue found.
// Just set issueStats to empty.
issueStats = & issues_model . IssueStats { }
} else {
// So it did search with the keyword, and found some issues. It needs to get issueStats of these issues.
// Or the keyword is empty, so it doesn't need issueIDs as filter, just get issueStats with statsOpts.
issueStats , err = issues_model . GetIssueStats ( ctx , statsOpts )
if err != nil {
ctx . ServerError ( "GetIssueStats" , err )
return
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
}
2016-12-24 16:03:21 +05:30
}
2020-07-10 02:43:06 +05:30
2024-03-02 21:12:31 +05:30
var isShowClosed optional . Option [ bool ]
2024-01-15 20:37:22 +05:30
switch ctx . FormString ( "state" ) {
case "closed" :
2024-03-02 21:12:31 +05:30
isShowClosed = optional . Some ( true )
2024-01-15 20:37:22 +05:30
case "all" :
2024-03-02 21:12:31 +05:30
isShowClosed = optional . None [ bool ] ( )
2024-01-15 20:37:22 +05:30
default :
2024-03-02 21:12:31 +05:30
isShowClosed = optional . Some ( false )
2024-01-15 20:37:22 +05:30
}
// if there are closed issues and no open issues, default to showing all issues
2021-08-11 06:01:13 +05:30
if len ( ctx . FormString ( "state" ) ) == 0 && issueStats . OpenCount == 0 && issueStats . ClosedCount != 0 {
2024-03-02 21:12:31 +05:30
isShowClosed = optional . None [ bool ] ( )
2020-07-10 02:43:06 +05:30
}
2023-10-19 19:38:31 +05:30
if repo . IsTimetrackerEnabled ( ctx ) {
totalTrackedTime , err := issues_model . GetIssueTotalTrackedTime ( ctx , statsOpts , isShowClosed )
if err != nil {
ctx . ServerError ( "GetIssueTotalTrackedTime" , err )
return
}
ctx . Data [ "TotalTrackedTime" ] = totalTrackedTime
}
2023-10-18 05:33:42 +05:30
archived := ctx . FormBool ( "archived" )
2021-07-29 07:12:15 +05:30
page := ctx . FormInt ( "page" )
2015-07-24 14:12:47 +05:30
if page <= 1 {
page = 1
}
2015-07-28 00:44:37 +05:30
var total int
2024-03-02 21:12:31 +05:30
switch {
case isShowClosed . Value ( ) :
2015-07-28 00:44:37 +05:30
total = int ( issueStats . ClosedCount )
2024-03-02 21:12:31 +05:30
case ! isShowClosed . Has ( ) :
2024-01-15 20:37:22 +05:30
total = int ( issueStats . OpenCount + issueStats . ClosedCount )
default :
total = int ( issueStats . OpenCount )
2015-07-24 14:12:47 +05:30
}
2019-04-20 09:45:19 +05:30
pager := context . NewPagination ( total , setting . UI . IssuePagingNum , page , 5 )
2014-07-26 11:58:04 +05:30
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
var issues issues_model . IssueList
{
2024-06-18 00:28:24 +05:30
ids , err := issueIDsFromSearch ( ctx , keyword , isFuzzy , & issues_model . IssuesOptions {
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
Paginator : & db . ListOptions {
2020-01-25 00:30:29 +05:30
Page : pager . Paginater . Current ( ) ,
PageSize : setting . UI . IssuePagingNum ,
} ,
2023-05-19 19:47:48 +05:30
RepoIDs : [ ] int64 { repo . ID } ,
2021-01-17 22:04:19 +05:30
AssigneeID : assigneeID ,
PosterID : posterID ,
MentionedID : mentionedID ,
ReviewRequestedID : reviewRequestedID ,
2023-02-25 08:25:50 +05:30
ReviewedID : reviewedID ,
2021-01-17 22:04:19 +05:30
MilestoneIDs : mileIDs ,
ProjectID : projectID ,
2024-01-15 20:37:22 +05:30
IsClosed : isShowClosed ,
2021-01-17 22:04:19 +05:30
IsPull : isPullOption ,
LabelIDs : labelIDs ,
SortType : sortType ,
2016-12-24 16:03:21 +05:30
} )
if err != nil {
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
if issue_indexer . IsAvailable ( ctx ) {
ctx . ServerError ( "issueIDsFromSearch" , err )
return
}
ctx . Data [ "IssueIndexerUnavailable" ] = true
return
}
issues , err = issues_model . GetIssuesByIDs ( ctx , ids , true )
if err != nil {
ctx . ServerError ( "GetIssuesByIDs" , err )
2016-12-24 16:03:21 +05:30
return
}
2014-07-26 11:58:04 +05:30
}
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
approvalCounts , err := issues . GetApprovalCounts ( ctx )
2020-03-06 09:14:06 +05:30
if err != nil {
ctx . ServerError ( "ApprovalCounts" , err )
return
}
2024-03-12 12:53:44 +05:30
if ctx . IsSigned {
if err := issues . LoadIsRead ( ctx , ctx . Doer . ID ) ; err != nil {
ctx . ServerError ( "LoadIsRead" , err )
2017-02-03 12:52:39 +05:30
return
2014-07-26 11:58:04 +05:30
}
2024-03-12 12:53:44 +05:30
} else {
for i := range issues {
issues [ i ] . IsRead = true
}
2021-04-15 23:04:43 +05:30
}
2019-04-03 01:24:29 +05:30
2022-04-27 04:10:01 +05:30
commitStatuses , lastStatus , err := pull_service . GetIssuesAllCommitStatus ( ctx , issues )
2021-04-15 23:04:43 +05:30
if err != nil {
2022-04-27 04:10:01 +05:30
ctx . ServerError ( "GetIssuesAllCommitStatus" , err )
2021-04-15 23:04:43 +05:30
return
2014-07-26 11:58:04 +05:30
}
2019-04-03 01:24:29 +05:30
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
if err := issues . LoadAttributes ( ctx ) ; err != nil {
ctx . ServerError ( "issues.LoadAttributes" , err )
return
}
2015-08-05 17:53:08 +05:30
ctx . Data [ "Issues" ] = issues
2022-04-27 04:10:01 +05:30
ctx . Data [ "CommitLastStatus" ] = lastStatus
ctx . Data [ "CommitStatuses" ] = commitStatuses
2015-08-05 17:53:08 +05:30
2018-11-29 07:16:30 +05:30
// Get assignees.
2023-04-07 05:41:02 +05:30
assigneeUsers , err := repo_model . GetRepoAssignees ( ctx , repo )
2015-08-05 17:53:08 +05:30
if err != nil {
2023-04-07 05:41:02 +05:30
ctx . ServerError ( "GetRepoAssignees" , err )
2022-08-09 01:33:58 +05:30
return
}
2023-08-25 16:37:42 +05:30
ctx . Data [ "Assignees" ] = MakeSelfOnTop ( ctx . Doer , assigneeUsers )
2022-08-09 01:33:58 +05:30
2020-12-21 21:09:28 +05:30
handleTeamMentions ( ctx )
if ctx . Written ( ) {
return
}
2022-06-13 15:07:59 +05:30
labels , err := issues_model . GetLabelsByRepoID ( ctx , repo . ID , "" , db . ListOptions { } )
2015-08-15 08:54:41 +05:30
if err != nil {
2018-11-29 07:16:30 +05:30
ctx . ServerError ( "GetLabelsByRepoID" , err )
2015-08-15 08:54:41 +05:30
return
}
Add Organization Wide Labels (#10814)
* Add organization wide labels
Implement organization wide labels similar to organization wide
webhooks. This lets you create individual labels for organizations that can be used
for all repos under that organization (so being able to reuse the same
label across multiple repos).
This makes it possible for small organizations with many repos to use
labels effectively.
Fixes #7406
* Add migration
* remove comments
* fix tests
* Update options/locale/locale_en-US.ini
Removed unused translation string
* show org labels in issue search label filter
* Use more clear var name
* rename migration after merge from master
* comment typo
* update migration again after rebase with master
* check for orgID <=0 per guillep2k review
* fmt
* Apply suggestions from code review
Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>
* remove unused code
* Make sure RepoID is 0 when searching orgID per code review
* more changes/code review requests
* More descriptive translation var per code review
* func description/delete comment when issue label deleted instead of hiding it
* remove comment
* only use issues in that repo when calculating number of open issues for org label on repo label page
* Add integration test for IssuesSearch API with labels
* remove unused function
* Update models/issue_label.go
Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>
* Use subquery in GetLabelIDsInReposByNames
* Fix tests to use correct orgID
* fix more tests
* IssuesSearch api now uses new BuildLabelNamesIssueIDsCondition. Add a few more tests as well
* update comment for clarity
* Revert previous code change now that we can use the new BuildLabelNamesIssueIDsCondition
* Don't sort repos by date in IssuesSearch API
After much debugging I've found a strange issue where in some cases MySQL will return a different result than other enigines if a query is sorted by a null collumn. For example with our integration test data where we don't set updated_unix in repository fixtures:
SELECT `id`, `owner_id`, `owner_name`, `lower_name`, `name`, `description`, `website`, `original_service_type`, `original_url`, `default_branch`, `num_watches`, `num_stars`, `num_forks`, `num_issues`, `num_closed_issues`, `num_pulls`, `num_closed_pulls`, `num_milestones`, `num_closed_milestones`, `is_private`, `is_empty`, `is_archived`, `is_mirror`, `status`, `is_fork`, `fork_id`, `is_template`, `template_id`, `size`, `is_fsck_enabled`, `close_issues_via_commit_in_any_branch`, `topics`, `avatar`, `created_unix`, `updated_unix` FROM `repository` ORDER BY updated_unix DESC LIMIT 15 OFFSET 45
Returns different results for MySQL than other engines. However, the similar query:
SELECT `id`, `owner_id`, `owner_name`, `lower_name`, `name`, `description`, `website`, `original_service_type`, `original_url`, `default_branch`, `num_watches`, `num_stars`, `num_forks`, `num_issues`, `num_closed_issues`, `num_pulls`, `num_closed_pulls`, `num_milestones`, `num_closed_milestones`, `is_private`, `is_empty`, `is_archived`, `is_mirror`, `status`, `is_fork`, `fork_id`, `is_template`, `template_id`, `size`, `is_fsck_enabled`, `close_issues_via_commit_in_any_branch`, `topics`, `avatar`, `created_unix`, `updated_unix` FROM `repository` ORDER BY updated_unix DESC LIMIT 15 OFFSET 30
Returns the same results.
This causes integration tests to fail on MySQL in certain cases but would never show up in a real installation. Since this API call always returns issues based on the optionally provided repo_priority_id or the issueID itself, there is no change to results by changing the repo sorting method used to get ids earlier in the function.
* linter is back!
* code review
* remove now unused option
* Fix newline at end of files
* more unused code
* update to master
* check for matching ids before query
* Update models/issue_label.go
Co-Authored-By: 6543 <6543@obermui.de>
* Update models/issue_label.go
* update comments
* Update routers/org/setting.go
Co-authored-by: Lauris BH <lauris@nix.lv>
Co-authored-by: guillep2k <18600385+guillep2k@users.noreply.github.com>
Co-authored-by: 6543 <6543@obermui.de>
2020-04-01 09:44:46 +05:30
if repo . Owner . IsOrganization ( ) {
2022-06-13 15:07:59 +05:30
orgLabels , err := issues_model . GetLabelsByOrgID ( ctx , repo . Owner . ID , ctx . FormString ( "sort" ) , db . ListOptions { } )
Add Organization Wide Labels (#10814)
* Add organization wide labels
Implement organization wide labels similar to organization wide
webhooks. This lets you create individual labels for organizations that can be used
for all repos under that organization (so being able to reuse the same
label across multiple repos).
This makes it possible for small organizations with many repos to use
labels effectively.
Fixes #7406
* Add migration
* remove comments
* fix tests
* Update options/locale/locale_en-US.ini
Removed unused translation string
* show org labels in issue search label filter
* Use more clear var name
* rename migration after merge from master
* comment typo
* update migration again after rebase with master
* check for orgID <=0 per guillep2k review
* fmt
* Apply suggestions from code review
Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>
* remove unused code
* Make sure RepoID is 0 when searching orgID per code review
* more changes/code review requests
* More descriptive translation var per code review
* func description/delete comment when issue label deleted instead of hiding it
* remove comment
* only use issues in that repo when calculating number of open issues for org label on repo label page
* Add integration test for IssuesSearch API with labels
* remove unused function
* Update models/issue_label.go
Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>
* Use subquery in GetLabelIDsInReposByNames
* Fix tests to use correct orgID
* fix more tests
* IssuesSearch api now uses new BuildLabelNamesIssueIDsCondition. Add a few more tests as well
* update comment for clarity
* Revert previous code change now that we can use the new BuildLabelNamesIssueIDsCondition
* Don't sort repos by date in IssuesSearch API
After much debugging I've found a strange issue where in some cases MySQL will return a different result than other enigines if a query is sorted by a null collumn. For example with our integration test data where we don't set updated_unix in repository fixtures:
SELECT `id`, `owner_id`, `owner_name`, `lower_name`, `name`, `description`, `website`, `original_service_type`, `original_url`, `default_branch`, `num_watches`, `num_stars`, `num_forks`, `num_issues`, `num_closed_issues`, `num_pulls`, `num_closed_pulls`, `num_milestones`, `num_closed_milestones`, `is_private`, `is_empty`, `is_archived`, `is_mirror`, `status`, `is_fork`, `fork_id`, `is_template`, `template_id`, `size`, `is_fsck_enabled`, `close_issues_via_commit_in_any_branch`, `topics`, `avatar`, `created_unix`, `updated_unix` FROM `repository` ORDER BY updated_unix DESC LIMIT 15 OFFSET 45
Returns different results for MySQL than other engines. However, the similar query:
SELECT `id`, `owner_id`, `owner_name`, `lower_name`, `name`, `description`, `website`, `original_service_type`, `original_url`, `default_branch`, `num_watches`, `num_stars`, `num_forks`, `num_issues`, `num_closed_issues`, `num_pulls`, `num_closed_pulls`, `num_milestones`, `num_closed_milestones`, `is_private`, `is_empty`, `is_archived`, `is_mirror`, `status`, `is_fork`, `fork_id`, `is_template`, `template_id`, `size`, `is_fsck_enabled`, `close_issues_via_commit_in_any_branch`, `topics`, `avatar`, `created_unix`, `updated_unix` FROM `repository` ORDER BY updated_unix DESC LIMIT 15 OFFSET 30
Returns the same results.
This causes integration tests to fail on MySQL in certain cases but would never show up in a real installation. Since this API call always returns issues based on the optionally provided repo_priority_id or the issueID itself, there is no change to results by changing the repo sorting method used to get ids earlier in the function.
* linter is back!
* code review
* remove now unused option
* Fix newline at end of files
* more unused code
* update to master
* check for matching ids before query
* Update models/issue_label.go
Co-Authored-By: 6543 <6543@obermui.de>
* Update models/issue_label.go
* update comments
* Update routers/org/setting.go
Co-authored-by: Lauris BH <lauris@nix.lv>
Co-authored-by: guillep2k <18600385+guillep2k@users.noreply.github.com>
Co-authored-by: 6543 <6543@obermui.de>
2020-04-01 09:44:46 +05:30
if err != nil {
ctx . ServerError ( "GetLabelsByOrgID" , err )
return
}
ctx . Data [ "OrgLabels" ] = orgLabels
labels = append ( labels , orgLabels ... )
}
Scoped labels (#22585)
Add a new "exclusive" option per label. This makes it so that when the
label is named `scope/name`, no other label with the same `scope/`
prefix can be set on an issue.
The scope is determined by the last occurence of `/`, so for example
`scope/alpha/name` and `scope/beta/name` are considered to be in
different scopes and can coexist.
Exclusive scopes are not enforced by any database rules, however they
are enforced when editing labels at the models level, automatically
removing any existing labels in the same scope when either attaching a
new label or replacing all labels.
In menus use a circle instead of checkbox to indicate they function as
radio buttons per scope. Issue filtering by label ensures that only a
single scoped label is selected at a time. Clicking with alt key can be
used to remove a scoped label, both when editing individual issues and
batch editing.
Label rendering refactor for consistency and code simplification:
* Labels now consistently have the same shape, emojis and tooltips
everywhere. This includes the label list and label assignment menus.
* In label list, show description below label same as label menus.
* Don't use exactly black/white text colors to look a bit nicer.
* Simplify text color computation. There is no point computing luminance
in linear color space, as this is a perceptual problem and sRGB is
closer to perceptually linear.
* Increase height of label assignment menus to show more labels. Showing
only 3-4 labels at a time leads to a lot of scrolling.
* Render all labels with a new RenderLabel template helper function.
Label creation and editing in multiline modal menu:
* Change label creation to open a modal menu like label editing.
* Change menu layout to place name, description and colors on separate
lines.
* Don't color cancel button red in label editing modal menu.
* Align text to the left in model menu for better readability and
consistent with settings layout elsewhere.
Custom exclusive scoped label rendering:
* Display scoped label prefix and suffix with slightly darker and
lighter background color respectively, and a slanted edge between them
similar to the `/` symbol.
* In menus exclusive labels are grouped with a divider line.
---------
Co-authored-by: Yarden Shoham <hrsi88@gmail.com>
Co-authored-by: Lauris BH <lauris@nix.lv>
2023-02-19 00:47:39 +05:30
// Get the exclusive scope for every label ID
labelExclusiveScopes := make ( [ ] string , 0 , len ( labelIDs ) )
for _ , labelID := range labelIDs {
foundExclusiveScope := false
for _ , label := range labels {
if label . ID == labelID || label . ID == - labelID {
labelExclusiveScopes = append ( labelExclusiveScopes , label . ExclusiveScope ( ) )
foundExclusiveScope = true
break
}
}
if ! foundExclusiveScope {
labelExclusiveScopes = append ( labelExclusiveScopes , "" )
}
}
2019-01-23 09:40:38 +05:30
for _ , l := range labels {
Scoped labels (#22585)
Add a new "exclusive" option per label. This makes it so that when the
label is named `scope/name`, no other label with the same `scope/`
prefix can be set on an issue.
The scope is determined by the last occurence of `/`, so for example
`scope/alpha/name` and `scope/beta/name` are considered to be in
different scopes and can coexist.
Exclusive scopes are not enforced by any database rules, however they
are enforced when editing labels at the models level, automatically
removing any existing labels in the same scope when either attaching a
new label or replacing all labels.
In menus use a circle instead of checkbox to indicate they function as
radio buttons per scope. Issue filtering by label ensures that only a
single scoped label is selected at a time. Clicking with alt key can be
used to remove a scoped label, both when editing individual issues and
batch editing.
Label rendering refactor for consistency and code simplification:
* Labels now consistently have the same shape, emojis and tooltips
everywhere. This includes the label list and label assignment menus.
* In label list, show description below label same as label menus.
* Don't use exactly black/white text colors to look a bit nicer.
* Simplify text color computation. There is no point computing luminance
in linear color space, as this is a perceptual problem and sRGB is
closer to perceptually linear.
* Increase height of label assignment menus to show more labels. Showing
only 3-4 labels at a time leads to a lot of scrolling.
* Render all labels with a new RenderLabel template helper function.
Label creation and editing in multiline modal menu:
* Change label creation to open a modal menu like label editing.
* Change menu layout to place name, description and colors on separate
lines.
* Don't color cancel button red in label editing modal menu.
* Align text to the left in model menu for better readability and
consistent with settings layout elsewhere.
Custom exclusive scoped label rendering:
* Display scoped label prefix and suffix with slightly darker and
lighter background color respectively, and a slanted edge between them
similar to the `/` symbol.
* In menus exclusive labels are grouped with a divider line.
---------
Co-authored-by: Yarden Shoham <hrsi88@gmail.com>
Co-authored-by: Lauris BH <lauris@nix.lv>
2023-02-19 00:47:39 +05:30
l . LoadSelectedLabelsAfterClick ( labelIDs , labelExclusiveScopes )
2019-01-23 09:40:38 +05:30
}
2018-11-29 07:16:30 +05:30
ctx . Data [ "Labels" ] = labels
2019-01-23 09:40:38 +05:30
ctx . Data [ "NumLabels" ] = len ( labels )
2014-07-26 11:58:04 +05:30
2021-07-29 07:12:15 +05:30
if ctx . FormInt64 ( "assignee" ) == 0 {
2016-07-17 06:55:30 +05:30
assigneeID = 0 // Reset ID to prevent unexpected selection of assignee.
}
2022-01-20 23:16:10 +05:30
ctx . Data [ "IssueRefEndNames" ] , ctx . Data [ "IssueRefURLs" ] = issue_service . GetRefEndNamesAndURLs ( issues , ctx . Repo . RepoLink )
2020-05-15 04:25:43 +05:30
2020-03-06 09:14:06 +05:30
ctx . Data [ "ApprovalCounts" ] = func ( issueID int64 , typ string ) int64 {
counts , ok := approvalCounts [ issueID ]
if ! ok || len ( counts ) == 0 {
return 0
}
2022-06-13 15:07:59 +05:30
reviewTyp := issues_model . ReviewTypeApprove
2020-03-06 09:14:06 +05:30
if typ == "reject" {
2022-06-13 15:07:59 +05:30
reviewTyp = issues_model . ReviewTypeReject
2020-04-06 22:03:34 +05:30
} else if typ == "waiting" {
2022-06-13 15:07:59 +05:30
reviewTyp = issues_model . ReviewTypeRequest
2020-03-06 09:14:06 +05:30
}
for _ , count := range counts {
if count . Type == reviewTyp {
return count . Count
}
}
return 0
}
2021-10-08 03:30:02 +05:30
2023-02-04 20:05:08 +05:30
retrieveProjects ( ctx , repo )
if ctx . Written ( ) {
2023-01-29 09:15:29 +05:30
return
2021-10-08 03:30:02 +05:30
}
2024-03-02 21:12:31 +05:30
pinned , err := issues_model . GetPinnedIssues ( ctx , repo . ID , isPullOption . Value ( ) )
2023-05-25 18:47:19 +05:30
if err != nil {
ctx . ServerError ( "GetPinnedIssues" , err )
return
}
ctx . Data [ "PinnedIssues" ] = pinned
ctx . Data [ "IsRepoAdmin" ] = ctx . IsSigned && ( ctx . Repo . IsAdmin ( ) || ctx . Doer . IsAdmin )
2014-07-26 11:58:04 +05:30
ctx . Data [ "IssueStats" ] = issueStats
2023-10-18 05:33:42 +05:30
ctx . Data [ "OpenCount" ] = issueStats . OpenCount
ctx . Data [ "ClosedCount" ] = issueStats . ClosedCount
linkStr := "%s?q=%s&type=%s&sort=%s&state=%s&labels=%s&milestone=%d&project=%d&assignee=%d&poster=%d&archived=%t"
2024-01-15 20:37:22 +05:30
ctx . Data [ "AllStatesLink" ] = fmt . Sprintf ( linkStr , ctx . Link ,
url . QueryEscape ( keyword ) , url . QueryEscape ( viewType ) , url . QueryEscape ( sortType ) , "all" , url . QueryEscape ( selectLabels ) ,
2024-03-19 10:16:40 +05:30
milestoneID , projectID , assigneeID , posterID , archived )
2023-10-18 05:33:42 +05:30
ctx . Data [ "OpenLink" ] = fmt . Sprintf ( linkStr , ctx . Link ,
url . QueryEscape ( keyword ) , url . QueryEscape ( viewType ) , url . QueryEscape ( sortType ) , "open" , url . QueryEscape ( selectLabels ) ,
2024-03-19 10:16:40 +05:30
milestoneID , projectID , assigneeID , posterID , archived )
2023-10-18 05:33:42 +05:30
ctx . Data [ "ClosedLink" ] = fmt . Sprintf ( linkStr , ctx . Link ,
url . QueryEscape ( keyword ) , url . QueryEscape ( viewType ) , url . QueryEscape ( sortType ) , "closed" , url . QueryEscape ( selectLabels ) ,
2024-03-19 10:16:40 +05:30
milestoneID , projectID , assigneeID , posterID , archived )
2019-12-28 20:13:46 +05:30
ctx . Data [ "SelLabelIDs" ] = labelIDs
ctx . Data [ "SelectLabels" ] = selectLabels
2014-07-26 11:58:04 +05:30
ctx . Data [ "ViewType" ] = viewType
2015-08-15 09:37:08 +05:30
ctx . Data [ "SortType" ] = sortType
2015-08-05 17:53:08 +05:30
ctx . Data [ "MilestoneID" ] = milestoneID
2023-01-29 09:15:29 +05:30
ctx . Data [ "ProjectID" ] = projectID
2015-08-15 08:54:41 +05:30
ctx . Data [ "AssigneeID" ] = assigneeID
2022-08-09 01:33:58 +05:30
ctx . Data [ "PosterID" ] = posterID
2024-06-18 00:28:24 +05:30
ctx . Data [ "IsFuzzy" ] = isFuzzy
2017-01-25 08:13:02 +05:30
ctx . Data [ "Keyword" ] = keyword
2024-03-02 21:12:31 +05:30
switch {
case isShowClosed . Value ( ) :
2014-07-26 11:58:04 +05:30
ctx . Data [ "State" ] = "closed"
2024-03-02 21:12:31 +05:30
case ! isShowClosed . Has ( ) :
2024-01-15 20:37:22 +05:30
ctx . Data [ "State" ] = "all"
default :
2015-07-25 00:22:25 +05:30
ctx . Data [ "State" ] = "open"
2014-07-26 11:58:04 +05:30
}
2023-10-18 05:33:42 +05:30
ctx . Data [ "ShowArchivedLabels" ] = archived
2019-04-20 09:45:19 +05:30
pager . AddParam ( ctx , "q" , "Keyword" )
pager . AddParam ( ctx , "type" , "ViewType" )
pager . AddParam ( ctx , "sort" , "SortType" )
pager . AddParam ( ctx , "state" , "State" )
pager . AddParam ( ctx , "labels" , "SelectLabels" )
pager . AddParam ( ctx , "milestone" , "MilestoneID" )
2023-01-29 09:15:29 +05:30
pager . AddParam ( ctx , "project" , "ProjectID" )
2019-04-20 09:45:19 +05:30
pager . AddParam ( ctx , "assignee" , "AssigneeID" )
2022-08-09 01:33:58 +05:30
pager . AddParam ( ctx , "poster" , "PosterID" )
2023-10-18 05:33:42 +05:30
pager . AddParam ( ctx , "archived" , "ShowArchivedLabels" )
2024-06-18 00:28:24 +05:30
pager . AddParam ( ctx , "fuzzy" , "IsFuzzy" )
2023-10-01 18:34:39 +05:30
2019-04-20 09:45:19 +05:30
ctx . Data [ "Page" ] = pager
2018-11-29 07:16:30 +05:30
}
2024-06-18 00:28:24 +05:30
func issueIDsFromSearch ( ctx * context . Context , keyword string , fuzzy bool , opts * issues_model . IssuesOptions ) ( [ ] int64 , error ) {
ids , _ , err := issue_indexer . SearchIssues ( ctx , issue_indexer . ToSearchOptions ( keyword , opts ) . Copy (
func ( o * issue_indexer . SearchOptions ) {
o . IsFuzzyKeyword = fuzzy
} ,
) )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
if err != nil {
return nil , fmt . Errorf ( "SearchIssues: %w" , err )
}
return ids , nil
}
2018-11-29 07:16:30 +05:30
// Issues render issues page
func Issues ( ctx * context . Context ) {
isPullList := ctx . Params ( ":type" ) == "pulls"
if isPullList {
MustAllowPulls ( ctx )
if ctx . Written ( ) {
return
}
ctx . Data [ "Title" ] = ctx . Tr ( "repo.pulls" )
ctx . Data [ "PageIsPullList" ] = true
} else {
MustEnableIssues ( ctx )
if ctx . Written ( ) {
return
}
ctx . Data [ "Title" ] = ctx . Tr ( "repo.issues" )
ctx . Data [ "PageIsIssueList" ] = true
2023-05-09 05:00:14 +05:30
ctx . Data [ "NewIssueChooseTemplate" ] = issue_service . HasTemplatesOrContactLinks ( ctx . Repo . Repository , ctx . Repo . GitRepo )
2018-11-29 07:16:30 +05:30
}
2024-03-02 21:12:31 +05:30
issues ( ctx , ctx . FormInt64 ( "milestone" ) , ctx . FormInt64 ( "project" ) , optional . Some ( isPullList ) )
2021-05-09 08:03:49 +05:30
if ctx . Written ( ) {
return
}
2018-11-29 07:16:30 +05:30
2023-04-30 18:42:49 +05:30
renderMilestones ( ctx )
if ctx . Written ( ) {
return
}
ctx . Data [ "CanWriteIssuesOrPulls" ] = ctx . Repo . CanWriteIssuesOrPulls ( isPullList )
ctx . HTML ( http . StatusOK , tplIssues )
}
func renderMilestones ( ctx * context . Context ) {
2020-07-28 17:00:40 +05:30
// Get milestones
2023-12-11 14:26:48 +05:30
milestones , err := db . Find [ issues_model . Milestone ] ( ctx , issues_model . FindMilestoneOptions {
2020-07-28 17:00:40 +05:30
RepoID : ctx . Repo . Repository . ID ,
} )
2018-11-29 07:16:30 +05:30
if err != nil {
ctx . ServerError ( "GetAllRepoMilestones" , err )
return
}
2015-07-25 00:22:25 +05:30
2023-04-30 18:42:49 +05:30
openMilestones , closedMilestones := issues_model . MilestoneList { } , issues_model . MilestoneList { }
for _ , milestone := range milestones {
if milestone . IsClosed {
closedMilestones = append ( closedMilestones , milestone )
} else {
openMilestones = append ( openMilestones , milestone )
}
}
ctx . Data [ "OpenMilestones" ] = openMilestones
ctx . Data [ "ClosedMilestones" ] = closedMilestones
2014-07-26 11:58:04 +05:30
}
2016-11-24 12:34:31 +05:30
// RetrieveRepoMilestonesAndAssignees find all the milestones and assignees of a repository
2021-12-10 06:57:50 +05:30
func RetrieveRepoMilestonesAndAssignees ( ctx * context . Context , repo * repo_model . Repository ) {
2015-09-02 04:37:02 +05:30
var err error
2023-12-11 14:26:48 +05:30
ctx . Data [ "OpenMilestones" ] , err = db . Find [ issues_model . Milestone ] ( ctx , issues_model . FindMilestoneOptions {
RepoID : repo . ID ,
2024-03-02 21:12:31 +05:30
IsClosed : optional . Some ( false ) ,
2020-07-28 17:00:40 +05:30
} )
2015-09-02 04:37:02 +05:30
if err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "GetMilestones" , err )
2015-09-02 04:37:02 +05:30
return
}
2023-12-11 14:26:48 +05:30
ctx . Data [ "ClosedMilestones" ] , err = db . Find [ issues_model . Milestone ] ( ctx , issues_model . FindMilestoneOptions {
RepoID : repo . ID ,
2024-03-02 21:12:31 +05:30
IsClosed : optional . Some ( true ) ,
2020-07-28 17:00:40 +05:30
} )
2015-09-02 04:37:02 +05:30
if err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "GetMilestones" , err )
2015-09-02 04:37:02 +05:30
return
}
2023-04-07 05:41:02 +05:30
assigneeUsers , err := repo_model . GetRepoAssignees ( ctx , repo )
2015-09-02 04:37:02 +05:30
if err != nil {
2023-04-07 05:41:02 +05:30
ctx . ServerError ( "GetRepoAssignees" , err )
2015-09-02 04:37:02 +05:30
return
}
2023-08-25 16:37:42 +05:30
ctx . Data [ "Assignees" ] = MakeSelfOnTop ( ctx . Doer , assigneeUsers )
2020-12-21 21:09:28 +05:30
handleTeamMentions ( ctx )
2015-09-02 04:37:02 +05:30
}
2021-12-10 06:57:50 +05:30
func retrieveProjects ( ctx * context . Context , repo * repo_model . Repository ) {
2023-07-29 21:05:53 +05:30
// Distinguish whether the owner of the repository
// is an individual or an organization
repoOwnerType := project_model . TypeIndividual
if repo . Owner . IsOrganization ( ) {
repoOwnerType = project_model . TypeOrganization
}
2020-08-17 08:37:38 +05:30
var err error
2023-11-24 09:19:41 +05:30
projects , err := db . Find [ project_model . Project ] ( ctx , project_model . SearchOptions {
ListOptions : db . ListOptionsAll ,
RepoID : repo . ID ,
2024-03-02 21:12:31 +05:30
IsClosed : optional . Some ( false ) ,
2023-11-24 09:19:41 +05:30
Type : project_model . TypeRepository ,
2020-08-17 08:37:38 +05:30
} )
if err != nil {
ctx . ServerError ( "GetProjects" , err )
return
}
2023-11-24 09:19:41 +05:30
projects2 , err := db . Find [ project_model . Project ] ( ctx , project_model . SearchOptions {
ListOptions : db . ListOptionsAll ,
OwnerID : repo . OwnerID ,
2024-03-02 21:12:31 +05:30
IsClosed : optional . Some ( false ) ,
2023-11-24 09:19:41 +05:30
Type : repoOwnerType ,
2023-01-20 17:12:33 +05:30
} )
if err != nil {
ctx . ServerError ( "GetProjects" , err )
return
}
ctx . Data [ "OpenProjects" ] = append ( projects , projects2 ... )
2020-08-17 08:37:38 +05:30
2023-11-24 09:19:41 +05:30
projects , err = db . Find [ project_model . Project ] ( ctx , project_model . SearchOptions {
ListOptions : db . ListOptionsAll ,
RepoID : repo . ID ,
2024-03-02 21:12:31 +05:30
IsClosed : optional . Some ( true ) ,
2023-11-24 09:19:41 +05:30
Type : project_model . TypeRepository ,
2020-08-17 08:37:38 +05:30
} )
if err != nil {
ctx . ServerError ( "GetProjects" , err )
return
}
2023-11-24 09:19:41 +05:30
projects2 , err = db . Find [ project_model . Project ] ( ctx , project_model . SearchOptions {
ListOptions : db . ListOptionsAll ,
OwnerID : repo . OwnerID ,
2024-03-02 21:12:31 +05:30
IsClosed : optional . Some ( true ) ,
2023-11-24 09:19:41 +05:30
Type : repoOwnerType ,
2023-01-20 17:12:33 +05:30
} )
if err != nil {
ctx . ServerError ( "GetProjects" , err )
return
}
ctx . Data [ "ClosedProjects" ] = append ( projects , projects2 ... )
2020-08-17 08:37:38 +05:30
}
2020-10-13 01:25:13 +05:30
// repoReviewerSelection items to bee shown
type repoReviewerSelection struct {
IsTeam bool
2022-03-29 11:59:02 +05:30
Team * organization . Team
2021-11-24 15:19:20 +05:30
User * user_model . User
2022-06-13 15:07:59 +05:30
Review * issues_model . Review
2020-10-13 01:25:13 +05:30
CanChange bool
Checked bool
ItemID int64
}
2020-04-06 22:03:34 +05:30
// RetrieveRepoReviewers find all reviewers of a repository
2022-06-13 15:07:59 +05:30
func RetrieveRepoReviewers ( ctx * context . Context , repo * repo_model . Repository , issue * issues_model . Issue , canChooseReviewer bool ) {
2020-10-13 01:25:13 +05:30
ctx . Data [ "CanChooseReviewer" ] = canChooseReviewer
2023-09-25 18:47:37 +05:30
originalAuthorReviews , err := issues_model . GetReviewersFromOriginalAuthorsByIssueID ( ctx , issue . ID )
2020-10-14 17:41:11 +05:30
if err != nil {
ctx . ServerError ( "GetReviewersFromOriginalAuthorsByIssueID" , err )
return
}
ctx . Data [ "OriginalReviews" ] = originalAuthorReviews
2023-09-25 18:47:37 +05:30
reviews , err := issues_model . GetReviewsByIssueID ( ctx , issue . ID )
2020-04-06 22:03:34 +05:30
if err != nil {
2020-10-13 01:25:13 +05:30
ctx . ServerError ( "GetReviewersByIssueID" , err )
return
}
if len ( reviews ) == 0 && ! canChooseReviewer {
2020-04-06 22:03:34 +05:30
return
}
2020-10-13 01:25:13 +05:30
var (
pullReviews [ ] * repoReviewerSelection
reviewersResult [ ] * repoReviewerSelection
teamReviewersResult [ ] * repoReviewerSelection
2022-03-29 11:59:02 +05:30
teamReviewers [ ] * organization . Team
2021-11-24 15:19:20 +05:30
reviewers [ ] * user_model . User
2020-10-13 01:25:13 +05:30
)
if canChooseReviewer {
posterID := issue . PosterID
if issue . OriginalAuthorID > 0 {
posterID = 0
}
2022-06-06 13:31:49 +05:30
reviewers , err = repo_model . GetReviewers ( ctx , repo , ctx . Doer . ID , posterID )
2020-10-13 01:25:13 +05:30
if err != nil {
ctx . ServerError ( "GetReviewers" , err )
return
}
2023-03-01 03:47:51 +05:30
teamReviewers , err = repo_service . GetReviewerTeams ( ctx , repo )
2020-10-13 01:25:13 +05:30
if err != nil {
ctx . ServerError ( "GetReviewerTeams" , err )
return
}
if len ( reviewers ) > 0 {
reviewersResult = make ( [ ] * repoReviewerSelection , 0 , len ( reviewers ) )
}
if len ( teamReviewers ) > 0 {
teamReviewersResult = make ( [ ] * repoReviewerSelection , 0 , len ( teamReviewers ) )
}
}
pullReviews = make ( [ ] * repoReviewerSelection , 0 , len ( reviews ) )
for _ , review := range reviews {
tmp := & repoReviewerSelection {
2022-06-13 15:07:59 +05:30
Checked : review . Type == issues_model . ReviewTypeRequest ,
2020-10-13 01:25:13 +05:30
Review : review ,
ItemID : review . ReviewerID ,
}
if review . ReviewerTeamID > 0 {
tmp . IsTeam = true
tmp . ItemID = - review . ReviewerTeamID
}
2024-02-24 18:08:43 +05:30
if canChooseReviewer {
// Users who can choose reviewers can also remove review requests
2020-10-13 01:25:13 +05:30
tmp . CanChange = true
2022-06-13 15:07:59 +05:30
} else if ctx . Doer != nil && ctx . Doer . ID == review . ReviewerID && review . Type == issues_model . ReviewTypeRequest {
2020-10-13 01:25:13 +05:30
// A user can refuse review requests
tmp . CanChange = true
}
pullReviews = append ( pullReviews , tmp )
if canChooseReviewer {
if tmp . IsTeam {
teamReviewersResult = append ( teamReviewersResult , tmp )
} else {
reviewersResult = append ( reviewersResult , tmp )
}
}
}
if len ( pullReviews ) > 0 {
// Drop all non-existing users and teams from the reviews
currentPullReviewers := make ( [ ] * repoReviewerSelection , 0 , len ( pullReviews ) )
for _ , item := range pullReviews {
if item . Review . ReviewerID > 0 {
2022-11-19 13:42:33 +05:30
if err = item . Review . LoadReviewer ( ctx ) ; err != nil {
2021-11-24 15:19:20 +05:30
if user_model . IsErrUserNotExist ( err ) {
2020-10-13 01:25:13 +05:30
continue
}
ctx . ServerError ( "LoadReviewer" , err )
return
}
item . User = item . Review . Reviewer
} else if item . Review . ReviewerTeamID > 0 {
2022-11-19 13:42:33 +05:30
if err = item . Review . LoadReviewerTeam ( ctx ) ; err != nil {
2022-03-29 11:59:02 +05:30
if organization . IsErrTeamNotExist ( err ) {
2020-10-13 01:25:13 +05:30
continue
}
ctx . ServerError ( "LoadReviewerTeam" , err )
return
}
item . Team = item . Review . ReviewerTeam
} else {
continue
}
currentPullReviewers = append ( currentPullReviewers , item )
}
ctx . Data [ "PullReviewers" ] = currentPullReviewers
}
if canChooseReviewer && reviewersResult != nil {
preadded := len ( reviewersResult )
for _ , reviewer := range reviewers {
found := false
reviewAddLoop :
for _ , tmp := range reviewersResult [ : preadded ] {
if tmp . ItemID == reviewer . ID {
tmp . User = reviewer
found = true
break reviewAddLoop
}
}
if found {
continue
}
reviewersResult = append ( reviewersResult , & repoReviewerSelection {
IsTeam : false ,
CanChange : true ,
User : reviewer ,
ItemID : reviewer . ID ,
} )
}
ctx . Data [ "Reviewers" ] = reviewersResult
}
if canChooseReviewer && teamReviewersResult != nil {
preadded := len ( teamReviewersResult )
for _ , team := range teamReviewers {
found := false
teamReviewAddLoop :
for _ , tmp := range teamReviewersResult [ : preadded ] {
if tmp . ItemID == - team . ID {
tmp . Team = team
found = true
break teamReviewAddLoop
}
}
if found {
continue
}
teamReviewersResult = append ( teamReviewersResult , & repoReviewerSelection {
IsTeam : true ,
CanChange : true ,
Team : team ,
ItemID : - team . ID ,
} )
}
ctx . Data [ "TeamReviewers" ] = teamReviewersResult
}
2020-04-06 22:03:34 +05:30
}
2016-11-24 12:34:31 +05:30
// RetrieveRepoMetas find all the meta information of a repository
2022-06-13 15:07:59 +05:30
func RetrieveRepoMetas ( ctx * context . Context , repo * repo_model . Repository , isPull bool ) [ ] * issues_model . Label {
2020-01-20 17:30:32 +05:30
if ! ctx . Repo . CanWriteIssuesOrPulls ( isPull ) {
2015-08-31 12:54:28 +05:30
return nil
}
2022-06-13 15:07:59 +05:30
labels , err := issues_model . GetLabelsByRepoID ( ctx , repo . ID , "" , db . ListOptions { } )
2015-08-31 12:54:28 +05:30
if err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "GetLabelsByRepoID" , err )
2015-08-31 12:54:28 +05:30
return nil
}
ctx . Data [ "Labels" ] = labels
Add Organization Wide Labels (#10814)
* Add organization wide labels
Implement organization wide labels similar to organization wide
webhooks. This lets you create individual labels for organizations that can be used
for all repos under that organization (so being able to reuse the same
label across multiple repos).
This makes it possible for small organizations with many repos to use
labels effectively.
Fixes #7406
* Add migration
* remove comments
* fix tests
* Update options/locale/locale_en-US.ini
Removed unused translation string
* show org labels in issue search label filter
* Use more clear var name
* rename migration after merge from master
* comment typo
* update migration again after rebase with master
* check for orgID <=0 per guillep2k review
* fmt
* Apply suggestions from code review
Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>
* remove unused code
* Make sure RepoID is 0 when searching orgID per code review
* more changes/code review requests
* More descriptive translation var per code review
* func description/delete comment when issue label deleted instead of hiding it
* remove comment
* only use issues in that repo when calculating number of open issues for org label on repo label page
* Add integration test for IssuesSearch API with labels
* remove unused function
* Update models/issue_label.go
Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>
* Use subquery in GetLabelIDsInReposByNames
* Fix tests to use correct orgID
* fix more tests
* IssuesSearch api now uses new BuildLabelNamesIssueIDsCondition. Add a few more tests as well
* update comment for clarity
* Revert previous code change now that we can use the new BuildLabelNamesIssueIDsCondition
* Don't sort repos by date in IssuesSearch API
After much debugging I've found a strange issue where in some cases MySQL will return a different result than other enigines if a query is sorted by a null collumn. For example with our integration test data where we don't set updated_unix in repository fixtures:
SELECT `id`, `owner_id`, `owner_name`, `lower_name`, `name`, `description`, `website`, `original_service_type`, `original_url`, `default_branch`, `num_watches`, `num_stars`, `num_forks`, `num_issues`, `num_closed_issues`, `num_pulls`, `num_closed_pulls`, `num_milestones`, `num_closed_milestones`, `is_private`, `is_empty`, `is_archived`, `is_mirror`, `status`, `is_fork`, `fork_id`, `is_template`, `template_id`, `size`, `is_fsck_enabled`, `close_issues_via_commit_in_any_branch`, `topics`, `avatar`, `created_unix`, `updated_unix` FROM `repository` ORDER BY updated_unix DESC LIMIT 15 OFFSET 45
Returns different results for MySQL than other engines. However, the similar query:
SELECT `id`, `owner_id`, `owner_name`, `lower_name`, `name`, `description`, `website`, `original_service_type`, `original_url`, `default_branch`, `num_watches`, `num_stars`, `num_forks`, `num_issues`, `num_closed_issues`, `num_pulls`, `num_closed_pulls`, `num_milestones`, `num_closed_milestones`, `is_private`, `is_empty`, `is_archived`, `is_mirror`, `status`, `is_fork`, `fork_id`, `is_template`, `template_id`, `size`, `is_fsck_enabled`, `close_issues_via_commit_in_any_branch`, `topics`, `avatar`, `created_unix`, `updated_unix` FROM `repository` ORDER BY updated_unix DESC LIMIT 15 OFFSET 30
Returns the same results.
This causes integration tests to fail on MySQL in certain cases but would never show up in a real installation. Since this API call always returns issues based on the optionally provided repo_priority_id or the issueID itself, there is no change to results by changing the repo sorting method used to get ids earlier in the function.
* linter is back!
* code review
* remove now unused option
* Fix newline at end of files
* more unused code
* update to master
* check for matching ids before query
* Update models/issue_label.go
Co-Authored-By: 6543 <6543@obermui.de>
* Update models/issue_label.go
* update comments
* Update routers/org/setting.go
Co-authored-by: Lauris BH <lauris@nix.lv>
Co-authored-by: guillep2k <18600385+guillep2k@users.noreply.github.com>
Co-authored-by: 6543 <6543@obermui.de>
2020-04-01 09:44:46 +05:30
if repo . Owner . IsOrganization ( ) {
2022-06-13 15:07:59 +05:30
orgLabels , err := issues_model . GetLabelsByOrgID ( ctx , repo . Owner . ID , ctx . FormString ( "sort" ) , db . ListOptions { } )
Add Organization Wide Labels (#10814)
* Add organization wide labels
Implement organization wide labels similar to organization wide
webhooks. This lets you create individual labels for organizations that can be used
for all repos under that organization (so being able to reuse the same
label across multiple repos).
This makes it possible for small organizations with many repos to use
labels effectively.
Fixes #7406
* Add migration
* remove comments
* fix tests
* Update options/locale/locale_en-US.ini
Removed unused translation string
* show org labels in issue search label filter
* Use more clear var name
* rename migration after merge from master
* comment typo
* update migration again after rebase with master
* check for orgID <=0 per guillep2k review
* fmt
* Apply suggestions from code review
Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>
* remove unused code
* Make sure RepoID is 0 when searching orgID per code review
* more changes/code review requests
* More descriptive translation var per code review
* func description/delete comment when issue label deleted instead of hiding it
* remove comment
* only use issues in that repo when calculating number of open issues for org label on repo label page
* Add integration test for IssuesSearch API with labels
* remove unused function
* Update models/issue_label.go
Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>
* Use subquery in GetLabelIDsInReposByNames
* Fix tests to use correct orgID
* fix more tests
* IssuesSearch api now uses new BuildLabelNamesIssueIDsCondition. Add a few more tests as well
* update comment for clarity
* Revert previous code change now that we can use the new BuildLabelNamesIssueIDsCondition
* Don't sort repos by date in IssuesSearch API
After much debugging I've found a strange issue where in some cases MySQL will return a different result than other enigines if a query is sorted by a null collumn. For example with our integration test data where we don't set updated_unix in repository fixtures:
SELECT `id`, `owner_id`, `owner_name`, `lower_name`, `name`, `description`, `website`, `original_service_type`, `original_url`, `default_branch`, `num_watches`, `num_stars`, `num_forks`, `num_issues`, `num_closed_issues`, `num_pulls`, `num_closed_pulls`, `num_milestones`, `num_closed_milestones`, `is_private`, `is_empty`, `is_archived`, `is_mirror`, `status`, `is_fork`, `fork_id`, `is_template`, `template_id`, `size`, `is_fsck_enabled`, `close_issues_via_commit_in_any_branch`, `topics`, `avatar`, `created_unix`, `updated_unix` FROM `repository` ORDER BY updated_unix DESC LIMIT 15 OFFSET 45
Returns different results for MySQL than other engines. However, the similar query:
SELECT `id`, `owner_id`, `owner_name`, `lower_name`, `name`, `description`, `website`, `original_service_type`, `original_url`, `default_branch`, `num_watches`, `num_stars`, `num_forks`, `num_issues`, `num_closed_issues`, `num_pulls`, `num_closed_pulls`, `num_milestones`, `num_closed_milestones`, `is_private`, `is_empty`, `is_archived`, `is_mirror`, `status`, `is_fork`, `fork_id`, `is_template`, `template_id`, `size`, `is_fsck_enabled`, `close_issues_via_commit_in_any_branch`, `topics`, `avatar`, `created_unix`, `updated_unix` FROM `repository` ORDER BY updated_unix DESC LIMIT 15 OFFSET 30
Returns the same results.
This causes integration tests to fail on MySQL in certain cases but would never show up in a real installation. Since this API call always returns issues based on the optionally provided repo_priority_id or the issueID itself, there is no change to results by changing the repo sorting method used to get ids earlier in the function.
* linter is back!
* code review
* remove now unused option
* Fix newline at end of files
* more unused code
* update to master
* check for matching ids before query
* Update models/issue_label.go
Co-Authored-By: 6543 <6543@obermui.de>
* Update models/issue_label.go
* update comments
* Update routers/org/setting.go
Co-authored-by: Lauris BH <lauris@nix.lv>
Co-authored-by: guillep2k <18600385+guillep2k@users.noreply.github.com>
Co-authored-by: 6543 <6543@obermui.de>
2020-04-01 09:44:46 +05:30
if err != nil {
return nil
}
ctx . Data [ "OrgLabels" ] = orgLabels
labels = append ( labels , orgLabels ... )
}
2015-08-31 12:54:28 +05:30
2015-09-02 04:37:02 +05:30
RetrieveRepoMilestonesAndAssignees ( ctx , repo )
if ctx . Written ( ) {
2015-08-31 12:54:28 +05:30
return nil
}
2020-08-17 08:37:38 +05:30
retrieveProjects ( ctx , repo )
if ctx . Written ( ) {
return nil
}
2023-07-21 16:50:04 +05:30
PrepareBranchList ( ctx )
if ctx . Written ( ) {
2017-08-24 18:00:27 +05:30
return nil
}
2018-07-18 02:53:58 +05:30
// Contains true if the user can create issue dependencies
2023-10-14 14:07:24 +05:30
ctx . Data [ "CanCreateIssueDependencies" ] = ctx . Repo . CanCreateIssueDependencies ( ctx , ctx . Doer , isPull )
2018-07-18 02:53:58 +05:30
2015-08-31 12:54:28 +05:30
return labels
}
2023-09-14 19:50:16 +05:30
// Tries to load and set an issue template. The first return value indicates if a template was loaded.
func setTemplateIfExists ( ctx * context . Context , ctxDataKey string , possibleFiles [ ] string ) ( bool , map [ string ] error ) {
2022-09-02 13:28:49 +05:30
commit , err := ctx . Repo . GitRepo . GetBranchCommit ( ctx . Repo . Repository . DefaultBranch )
2016-02-18 03:51:31 +05:30
if err != nil {
2023-09-14 19:50:16 +05:30
return false , nil
2016-02-18 03:51:31 +05:30
}
2022-09-02 13:28:49 +05:30
templateCandidates := make ( [ ] string , 0 , 1 + len ( possibleFiles ) )
if t := ctx . FormString ( "template" ) ; t != "" {
templateCandidates = append ( templateCandidates , t )
2020-09-11 20:18:39 +05:30
}
templateCandidates = append ( templateCandidates , possibleFiles ... ) // Append files to the end because they should be fallback
2022-09-02 13:28:49 +05:30
templateErrs := map [ string ] error { }
2020-09-11 20:18:39 +05:30
for _ , filename := range templateCandidates {
2022-09-02 13:28:49 +05:30
if ok , _ := commit . HasFile ( filename ) ; ! ok {
continue
}
template , err := issue_template . UnmarshalFromCommit ( commit , filename )
if err != nil {
templateErrs [ filename ] = err
continue
}
ctx . Data [ issueTemplateTitleKey ] = template . Title
ctx . Data [ ctxDataKey ] = template . Content
if template . Type ( ) == api . IssueTemplateTypeYaml {
2023-01-30 10:06:04 +05:30
// Replace field default values by values from query
for _ , field := range template . Fields {
fieldValue := ctx . FormString ( "field:" + field . ID )
if fieldValue != "" {
field . Attributes [ "value" ] = fieldValue
}
}
2022-09-02 13:28:49 +05:30
ctx . Data [ "Fields" ] = template . Fields
ctx . Data [ "TemplateFile" ] = template . FileName
}
labelIDs := make ( [ ] string , 0 , len ( template . Labels ) )
if repoLabels , err := issues_model . GetLabelsByRepoID ( ctx , ctx . Repo . Repository . ID , "" , db . ListOptions { } ) ; err == nil {
ctx . Data [ "Labels" ] = repoLabels
if ctx . Repo . Owner . IsOrganization ( ) {
if orgLabels , err := issues_model . GetLabelsByOrgID ( ctx , ctx . Repo . Owner . ID , ctx . FormString ( "sort" ) , db . ListOptions { } ) ; err == nil {
ctx . Data [ "OrgLabels" ] = orgLabels
repoLabels = append ( repoLabels , orgLabels ... )
2021-02-10 22:48:22 +05:30
}
2022-09-02 13:28:49 +05:30
}
2021-02-10 22:48:22 +05:30
2022-09-02 13:28:49 +05:30
for _ , metaLabel := range template . Labels {
for _ , repoLabel := range repoLabels {
if strings . EqualFold ( repoLabel . Name , metaLabel ) {
repoLabel . IsChecked = true
labelIDs = append ( labelIDs , strconv . FormatInt ( repoLabel . ID , 10 ) )
break
2020-09-11 20:18:39 +05:30
}
}
}
2022-11-22 18:28:49 +05:30
}
2023-01-19 03:20:22 +05:30
if template . Ref != "" && ! strings . HasPrefix ( template . Ref , "refs/" ) { // Assume that the ref intended is always a branch - for tags users should use refs/tags/<ref>
2022-11-22 18:28:49 +05:30
template . Ref = git . BranchPrefix + template . Ref
2016-02-18 03:51:31 +05:30
}
2022-09-02 13:28:49 +05:30
ctx . Data [ "HasSelectedLabel" ] = len ( labelIDs ) > 0
ctx . Data [ "label_ids" ] = strings . Join ( labelIDs , "," )
ctx . Data [ "Reference" ] = template . Ref
2023-05-26 06:34:48 +05:30
ctx . Data [ "RefEndName" ] = git . RefName ( template . Ref ) . ShortName ( )
2023-09-14 19:50:16 +05:30
return true , templateErrs
2016-02-18 03:51:31 +05:30
}
2023-09-14 19:50:16 +05:30
return false , templateErrs
2016-02-18 03:51:31 +05:30
}
2019-01-21 17:15:32 +05:30
// NewIssue render creating issue page
2016-03-11 22:26:52 +05:30
func NewIssue ( ctx * context . Context ) {
2023-08-31 21:06:25 +05:30
issueConfig , _ := issue_service . GetTemplateConfigFromDefaultBranch ( ctx . Repo . Repository , ctx . Repo . GitRepo )
hasTemplates := issue_service . HasTemplatesOrContactLinks ( ctx . Repo . Repository , ctx . Repo . GitRepo )
2015-08-09 12:53:02 +05:30
ctx . Data [ "Title" ] = ctx . Tr ( "repo.issues.new" )
ctx . Data [ "PageIsIssueList" ] = true
2023-08-31 21:06:25 +05:30
ctx . Data [ "NewIssueChooseTemplate" ] = hasTemplates
2018-08-14 00:34:39 +05:30
ctx . Data [ "PullRequestWorkInProgressPrefixes" ] = setting . Repository . PullRequest . WorkInProgressPrefixes
2021-08-11 06:01:13 +05:30
title := ctx . FormString ( "title" )
2020-09-11 20:18:39 +05:30
ctx . Data [ "TitleQuery" ] = title
2021-08-11 06:01:13 +05:30
body := ctx . FormString ( "body" )
2019-01-28 20:53:59 +05:30
ctx . Data [ "BodyQuery" ] = body
2021-03-18 07:32:38 +05:30
2022-06-30 21:25:08 +05:30
isProjectsEnabled := ctx . Repo . CanRead ( unit . TypeProjects )
ctx . Data [ "IsProjectsEnabled" ] = isProjectsEnabled
2020-10-05 11:19:33 +05:30
ctx . Data [ "IsAttachmentEnabled" ] = setting . Attachment . Enabled
upload . AddUploadContext ( ctx , "comment" )
2018-11-29 07:16:30 +05:30
2021-07-29 07:12:15 +05:30
milestoneID := ctx . FormInt64 ( "milestone" )
2019-06-10 19:46:02 +05:30
if milestoneID > 0 {
2022-04-08 14:41:15 +05:30
milestone , err := issues_model . GetMilestoneByRepoID ( ctx , ctx . Repo . Repository . ID , milestoneID )
2019-06-10 19:46:02 +05:30
if err != nil {
log . Error ( "GetMilestoneByID: %d: %v" , milestoneID , err )
} else {
ctx . Data [ "milestone_id" ] = milestoneID
ctx . Data [ "Milestone" ] = milestone
}
2018-11-29 07:16:30 +05:30
}
2021-07-29 07:12:15 +05:30
projectID := ctx . FormInt64 ( "project" )
2022-06-30 21:25:08 +05:30
if projectID > 0 && isProjectsEnabled {
2022-05-20 19:38:52 +05:30
project , err := project_model . GetProjectByID ( ctx , projectID )
2020-08-17 08:37:38 +05:30
if err != nil {
log . Error ( "GetProjectByID: %d: %v" , projectID , err )
} else if project . RepoID != ctx . Repo . Repository . ID {
log . Error ( "GetProjectByID: %d: %v" , projectID , fmt . Errorf ( "project[%d] not in repo [%d]" , project . ID , ctx . Repo . Repository . ID ) )
} else {
ctx . Data [ "project_id" ] = projectID
ctx . Data [ "Project" ] = project
}
2021-10-06 00:51:52 +05:30
if len ( ctx . Req . URL . Query ( ) . Get ( "project" ) ) > 0 {
ctx . Data [ "redirect_after_creation" ] = "project"
}
2020-08-17 08:37:38 +05:30
}
2020-01-19 12:13:38 +05:30
RetrieveRepoMetas ( ctx , ctx . Repo . Repository , false )
2022-09-02 13:28:49 +05:30
2023-07-21 16:50:04 +05:30
tags , err := repo_model . GetTagNamesByRepoID ( ctx , ctx . Repo . Repository . ID )
if err != nil {
ctx . ServerError ( "GetTagNamesByRepoID" , err )
return
}
ctx . Data [ "Tags" ] = tags
2023-05-09 05:00:14 +05:30
_ , templateErrs := issue_service . GetTemplatesFromDefaultBranch ( ctx . Repo . Repository , ctx . Repo . GitRepo )
2023-09-14 19:50:16 +05:30
templateLoaded , errs := setTemplateIfExists ( ctx , issueTemplateKey , IssueTemplateCandidates )
2023-10-06 12:19:37 +05:30
for k , v := range errs {
templateErrs [ k ] = v
2022-09-02 13:28:49 +05:30
}
2015-08-31 12:54:28 +05:30
if ctx . Written ( ) {
return
2015-08-10 16:27:57 +05:30
}
2015-08-09 12:53:02 +05:30
2022-09-02 13:28:49 +05:30
if len ( templateErrs ) > 0 {
ctx . Flash . Warning ( renderErrorOfTemplates ( ctx , templateErrs ) , true )
}
2021-11-10 01:27:58 +05:30
ctx . Data [ "HasIssuesOrPullsWritePermission" ] = ctx . Repo . CanWrite ( unit . TypeIssues )
2020-04-04 11:09:48 +05:30
2023-09-14 19:50:16 +05:30
if ! issueConfig . BlankIssuesEnabled && hasTemplates && ! templateLoaded {
// The "issues/new" and "issues/new/choose" share the same query parameters "project" and "milestone", if blank issues are disabled, just redirect to the "issues/choose" page with these parameters.
ctx . Redirect ( fmt . Sprintf ( "%s/issues/new/choose?%s" , ctx . Repo . Repository . Link ( ) , ctx . Req . URL . RawQuery ) , http . StatusSeeOther )
return
}
2021-04-05 21:00:52 +05:30
ctx . HTML ( http . StatusOK , tplIssueNew )
2014-07-26 11:58:04 +05:30
}
2024-03-02 20:35:07 +05:30
func renderErrorOfTemplates ( ctx * context . Context , errs map [ string ] error ) template . HTML {
2022-09-02 13:28:49 +05:30
var files [ ] string
for k := range errs {
files = append ( files , k )
}
sort . Strings ( files ) // keep the output stable
var lines [ ] string
for _ , file := range files {
lines = append ( lines , fmt . Sprintf ( "%s: %v" , file , errs [ file ] ) )
}
2024-03-02 20:35:07 +05:30
flashError , err := ctx . RenderToHTML ( tplAlertDetails , map [ string ] any {
2022-09-02 13:28:49 +05:30
"Message" : ctx . Tr ( "repo.issues.choose.ignore_invalid_templates" ) ,
"Summary" : ctx . Tr ( "repo.issues.choose.invalid_templates" , len ( errs ) ) ,
"Details" : utils . SanitizeFlashErrorString ( strings . Join ( lines , "\n" ) ) ,
} )
if err != nil {
log . Debug ( "render flash error: %v" , err )
2024-03-02 20:35:07 +05:30
flashError = ctx . Locale . Tr ( "repo.issues.choose.ignore_invalid_templates" )
2022-09-02 13:28:49 +05:30
}
return flashError
}
2020-09-11 20:18:39 +05:30
// NewIssueChooseTemplate render creating issue from template page
func NewIssueChooseTemplate ( ctx * context . Context ) {
ctx . Data [ "Title" ] = ctx . Tr ( "repo.issues.new" )
ctx . Data [ "PageIsIssueList" ] = true
2023-05-09 05:00:14 +05:30
issueTemplates , errs := issue_service . GetTemplatesFromDefaultBranch ( ctx . Repo . Repository , ctx . Repo . GitRepo )
2020-09-11 20:18:39 +05:30
ctx . Data [ "IssueTemplates" ] = issueTemplates
2022-09-02 13:28:49 +05:30
if len ( errs ) > 0 {
ctx . Flash . Warning ( renderErrorOfTemplates ( ctx , errs ) , true )
}
2023-05-09 05:00:14 +05:30
if ! issue_service . HasTemplatesOrContactLinks ( ctx . Repo . Repository , ctx . Repo . GitRepo ) {
2022-03-23 01:24:24 +05:30
// The "issues/new" and "issues/new/choose" share the same query parameters "project" and "milestone", if no template here, just redirect to the "issues/new" page with these parameters.
2023-02-11 12:04:11 +05:30
ctx . Redirect ( fmt . Sprintf ( "%s/issues/new?%s" , ctx . Repo . Repository . Link ( ) , ctx . Req . URL . RawQuery ) , http . StatusSeeOther )
2022-03-23 01:24:24 +05:30
return
}
2023-05-09 05:00:14 +05:30
issueConfig , err := issue_service . GetTemplateConfigFromDefaultBranch ( ctx . Repo . Repository , ctx . Repo . GitRepo )
2023-03-28 23:52:07 +05:30
ctx . Data [ "IssueConfig" ] = issueConfig
ctx . Data [ "IssueConfigError" ] = err // ctx.Flash.Err makes problems here
2022-03-23 01:24:24 +05:30
ctx . Data [ "milestone" ] = ctx . FormInt64 ( "milestone" )
ctx . Data [ "project" ] = ctx . FormInt64 ( "project" )
2021-04-05 21:00:52 +05:30
ctx . HTML ( http . StatusOK , tplIssueChoose )
2020-09-11 20:18:39 +05:30
}
2022-03-09 06:08:11 +05:30
// DeleteIssue deletes an issue
func DeleteIssue ( ctx * context . Context ) {
issue := GetActionIssue ( ctx )
if ctx . Written ( ) {
return
}
2023-04-14 23:48:28 +05:30
if err := issue_service . DeleteIssue ( ctx , ctx . Doer , ctx . Repo . GitRepo , issue ) ; err != nil {
2022-03-09 06:08:11 +05:30
ctx . ServerError ( "DeleteIssueByID" , err )
return
}
2022-06-19 15:35:15 +05:30
if issue . IsPull {
2023-02-11 12:04:11 +05:30
ctx . Redirect ( fmt . Sprintf ( "%s/pulls" , ctx . Repo . Repository . Link ( ) ) , http . StatusSeeOther )
2022-06-19 15:35:15 +05:30
return
}
2023-02-11 12:04:11 +05:30
ctx . Redirect ( fmt . Sprintf ( "%s/issues" , ctx . Repo . Repository . Link ( ) ) , http . StatusSeeOther )
2022-03-09 06:08:11 +05:30
}
2021-07-08 17:08:13 +05:30
// ValidateRepoMetas check and returns repository's meta information
2021-04-07 01:14:05 +05:30
func ValidateRepoMetas ( ctx * context . Context , form forms . CreateIssueForm , isPull bool ) ( [ ] int64 , [ ] int64 , int64 , int64 ) {
2015-09-02 04:37:02 +05:30
var (
repo = ctx . Repo . Repository
err error
)
2020-01-19 12:13:38 +05:30
labels := RetrieveRepoMetas ( ctx , ctx . Repo . Repository , isPull )
2015-09-02 04:37:02 +05:30
if ctx . Written ( ) {
2020-08-17 08:37:38 +05:30
return nil , nil , 0 , 0
2015-09-02 04:37:02 +05:30
}
2017-02-01 08:06:08 +05:30
var labelIDs [ ] int64
2015-09-02 04:37:02 +05:30
hasSelected := false
2017-02-01 08:06:08 +05:30
// Check labels.
if len ( form . LabelIDs ) > 0 {
labelIDs , err = base . StringsToInt64s ( strings . Split ( form . LabelIDs , "," ) )
if err != nil {
2020-08-17 08:37:38 +05:30
return nil , nil , 0 , 0
2017-02-01 08:06:08 +05:30
}
2022-10-12 10:48:26 +05:30
labelIDMark := make ( container . Set [ int64 ] )
labelIDMark . AddMultiple ( labelIDs ... )
2017-02-01 08:06:08 +05:30
for i := range labels {
2022-10-12 10:48:26 +05:30
if labelIDMark . Contains ( labels [ i ] . ID ) {
2017-02-01 08:06:08 +05:30
labels [ i ] . IsChecked = true
hasSelected = true
}
2015-09-02 04:37:02 +05:30
}
}
2017-02-01 08:06:08 +05:30
ctx . Data [ "Labels" ] = labels
2015-09-02 04:37:02 +05:30
ctx . Data [ "HasSelectedLabel" ] = hasSelected
ctx . Data [ "label_ids" ] = form . LabelIDs
// Check milestone.
milestoneID := form . MilestoneID
if milestoneID > 0 {
2022-04-08 14:41:15 +05:30
milestone , err := issues_model . GetMilestoneByRepoID ( ctx , ctx . Repo . Repository . ID , milestoneID )
2015-09-02 04:37:02 +05:30
if err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "GetMilestoneByID" , err )
2020-08-17 08:37:38 +05:30
return nil , nil , 0 , 0
2015-09-02 04:37:02 +05:30
}
2021-12-10 06:57:50 +05:30
if milestone . RepoID != repo . ID {
ctx . ServerError ( "GetMilestoneByID" , err )
return nil , nil , 0 , 0
}
ctx . Data [ "Milestone" ] = milestone
2015-09-02 04:37:02 +05:30
ctx . Data [ "milestone_id" ] = milestoneID
}
2020-08-17 08:37:38 +05:30
if form . ProjectID > 0 {
2022-05-20 19:38:52 +05:30
p , err := project_model . GetProjectByID ( ctx , form . ProjectID )
2020-08-17 08:37:38 +05:30
if err != nil {
ctx . ServerError ( "GetProjectByID" , err )
return nil , nil , 0 , 0
}
2023-01-20 17:12:33 +05:30
if p . RepoID != ctx . Repo . Repository . ID && p . OwnerID != ctx . Repo . Repository . OwnerID {
2020-08-17 08:37:38 +05:30
ctx . NotFound ( "" , nil )
return nil , nil , 0 , 0
}
ctx . Data [ "Project" ] = p
ctx . Data [ "project_id" ] = form . ProjectID
}
2018-05-09 21:59:04 +05:30
// Check assignees
var assigneeIDs [ ] int64
if len ( form . AssigneeIDs ) > 0 {
assigneeIDs , err = base . StringsToInt64s ( strings . Split ( form . AssigneeIDs , "," ) )
2015-09-02 04:37:02 +05:30
if err != nil {
2020-08-17 08:37:38 +05:30
return nil , nil , 0 , 0
2018-05-09 21:59:04 +05:30
}
2019-10-25 20:16:37 +05:30
// Check if the passed assignees actually exists and is assignable
2018-05-09 21:59:04 +05:30
for _ , aID := range assigneeIDs {
2022-12-03 08:18:26 +05:30
assignee , err := user_model . GetUserByID ( ctx , aID )
2018-11-28 16:56:14 +05:30
if err != nil {
ctx . ServerError ( "GetUserByID" , err )
2020-08-17 08:37:38 +05:30
return nil , nil , 0 , 0
2018-11-28 16:56:14 +05:30
}
2022-05-11 15:39:36 +05:30
valid , err := access_model . CanBeAssigned ( ctx , assignee , repo , isPull )
2018-05-09 21:59:04 +05:30
if err != nil {
2020-08-17 08:37:38 +05:30
ctx . ServerError ( "CanBeAssigned" , err )
return nil , nil , 0 , 0
2018-11-28 16:56:14 +05:30
}
2020-08-17 08:37:38 +05:30
2019-10-25 20:16:37 +05:30
if ! valid {
2022-06-13 15:07:59 +05:30
ctx . ServerError ( "canBeAssigned" , repo_model . ErrUserDoesNotHaveAccessToRepo { UserID : aID , RepoName : repo . Name } )
2020-08-17 08:37:38 +05:30
return nil , nil , 0 , 0
2018-05-09 21:59:04 +05:30
}
2015-09-02 04:37:02 +05:30
}
}
2018-05-09 21:59:04 +05:30
// Keep the old assignee id thingy for compatibility reasons
if form . AssigneeID > 0 {
assigneeIDs = append ( assigneeIDs , form . AssigneeID )
}
2020-08-17 08:37:38 +05:30
return labelIDs , assigneeIDs , milestoneID , form . ProjectID
2015-09-02 04:37:02 +05:30
}
2016-11-24 12:34:31 +05:30
// NewIssuePost response for creating new issue
2021-01-26 21:06:53 +05:30
func NewIssuePost ( ctx * context . Context ) {
2021-04-07 01:14:05 +05:30
form := web . GetForm ( ctx ) . ( * forms . CreateIssueForm )
2015-08-09 12:53:02 +05:30
ctx . Data [ "Title" ] = ctx . Tr ( "repo.issues.new" )
ctx . Data [ "PageIsIssueList" ] = true
2023-05-09 05:00:14 +05:30
ctx . Data [ "NewIssueChooseTemplate" ] = issue_service . HasTemplatesOrContactLinks ( ctx . Repo . Repository , ctx . Repo . GitRepo )
2018-08-14 00:34:39 +05:30
ctx . Data [ "PullRequestWorkInProgressPrefixes" ] = setting . Repository . PullRequest . WorkInProgressPrefixes
2020-10-05 11:19:33 +05:30
ctx . Data [ "IsAttachmentEnabled" ] = setting . Attachment . Enabled
upload . AddUploadContext ( ctx , "comment" )
2014-07-26 11:58:04 +05:30
2015-08-10 14:22:08 +05:30
var (
2015-08-10 16:27:57 +05:30
repo = ctx . Repo . Repository
2015-08-11 20:54:40 +05:30
attachments [ ] string
2015-08-10 14:22:08 +05:30
)
2015-08-31 12:54:28 +05:30
2021-01-26 21:06:53 +05:30
labelIDs , assigneeIDs , milestoneID , projectID := ValidateRepoMetas ( ctx , * form , false )
2015-09-02 04:37:02 +05:30
if ctx . Written ( ) {
return
2015-08-10 14:22:08 +05:30
}
2020-08-18 09:53:45 +05:30
if setting . Attachment . Enabled {
2016-08-11 18:18:08 +05:30
attachments = form . Files
2015-08-11 20:54:40 +05:30
}
2014-07-26 11:58:04 +05:30
if ctx . HasError ( ) {
2023-06-16 12:02:43 +05:30
ctx . JSONError ( ctx . GetErrMsg ( ) )
2014-07-26 11:58:04 +05:30
return
}
2019-01-21 17:15:32 +05:30
if util . IsEmptyString ( form . Title ) {
2023-06-16 12:02:43 +05:30
ctx . JSONError ( ctx . Tr ( "repo.issues.new.title_empty" ) )
2019-01-21 17:15:32 +05:30
return
}
2022-09-02 13:28:49 +05:30
content := form . Content
if filename := ctx . Req . Form . Get ( "template-file" ) ; filename != "" {
if template , err := issue_template . UnmarshalFromRepo ( ctx . Repo . GitRepo , ctx . Repo . Repository . DefaultBranch , filename ) ; err == nil {
content = issue_template . RenderToMarkdown ( template , ctx . Req . Form )
}
}
2022-06-13 15:07:59 +05:30
issue := & issues_model . Issue {
2016-03-14 08:50:22 +05:30
RepoID : repo . ID ,
2021-11-16 23:48:25 +05:30
Repo : repo ,
2016-08-14 16:02:24 +05:30
Title : form . Title ,
2022-03-22 12:33:22 +05:30
PosterID : ctx . Doer . ID ,
Poster : ctx . Doer ,
2015-08-10 16:27:57 +05:30
MilestoneID : milestoneID ,
2022-09-02 13:28:49 +05:30
Content : content ,
2017-08-24 18:00:27 +05:30
Ref : form . Ref ,
2014-07-26 11:58:04 +05:30
}
Add Organization Wide Labels (#10814)
* Add organization wide labels
Implement organization wide labels similar to organization wide
webhooks. This lets you create individual labels for organizations that can be used
for all repos under that organization (so being able to reuse the same
label across multiple repos).
This makes it possible for small organizations with many repos to use
labels effectively.
Fixes #7406
* Add migration
* remove comments
* fix tests
* Update options/locale/locale_en-US.ini
Removed unused translation string
* show org labels in issue search label filter
* Use more clear var name
* rename migration after merge from master
* comment typo
* update migration again after rebase with master
* check for orgID <=0 per guillep2k review
* fmt
* Apply suggestions from code review
Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>
* remove unused code
* Make sure RepoID is 0 when searching orgID per code review
* more changes/code review requests
* More descriptive translation var per code review
* func description/delete comment when issue label deleted instead of hiding it
* remove comment
* only use issues in that repo when calculating number of open issues for org label on repo label page
* Add integration test for IssuesSearch API with labels
* remove unused function
* Update models/issue_label.go
Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>
* Use subquery in GetLabelIDsInReposByNames
* Fix tests to use correct orgID
* fix more tests
* IssuesSearch api now uses new BuildLabelNamesIssueIDsCondition. Add a few more tests as well
* update comment for clarity
* Revert previous code change now that we can use the new BuildLabelNamesIssueIDsCondition
* Don't sort repos by date in IssuesSearch API
After much debugging I've found a strange issue where in some cases MySQL will return a different result than other enigines if a query is sorted by a null collumn. For example with our integration test data where we don't set updated_unix in repository fixtures:
SELECT `id`, `owner_id`, `owner_name`, `lower_name`, `name`, `description`, `website`, `original_service_type`, `original_url`, `default_branch`, `num_watches`, `num_stars`, `num_forks`, `num_issues`, `num_closed_issues`, `num_pulls`, `num_closed_pulls`, `num_milestones`, `num_closed_milestones`, `is_private`, `is_empty`, `is_archived`, `is_mirror`, `status`, `is_fork`, `fork_id`, `is_template`, `template_id`, `size`, `is_fsck_enabled`, `close_issues_via_commit_in_any_branch`, `topics`, `avatar`, `created_unix`, `updated_unix` FROM `repository` ORDER BY updated_unix DESC LIMIT 15 OFFSET 45
Returns different results for MySQL than other engines. However, the similar query:
SELECT `id`, `owner_id`, `owner_name`, `lower_name`, `name`, `description`, `website`, `original_service_type`, `original_url`, `default_branch`, `num_watches`, `num_stars`, `num_forks`, `num_issues`, `num_closed_issues`, `num_pulls`, `num_closed_pulls`, `num_milestones`, `num_closed_milestones`, `is_private`, `is_empty`, `is_archived`, `is_mirror`, `status`, `is_fork`, `fork_id`, `is_template`, `template_id`, `size`, `is_fsck_enabled`, `close_issues_via_commit_in_any_branch`, `topics`, `avatar`, `created_unix`, `updated_unix` FROM `repository` ORDER BY updated_unix DESC LIMIT 15 OFFSET 30
Returns the same results.
This causes integration tests to fail on MySQL in certain cases but would never show up in a real installation. Since this API call always returns issues based on the optionally provided repo_priority_id or the issueID itself, there is no change to results by changing the repo sorting method used to get ids earlier in the function.
* linter is back!
* code review
* remove now unused option
* Fix newline at end of files
* more unused code
* update to master
* check for matching ids before query
* Update models/issue_label.go
Co-Authored-By: 6543 <6543@obermui.de>
* Update models/issue_label.go
* update comments
* Update routers/org/setting.go
Co-authored-by: Lauris BH <lauris@nix.lv>
Co-authored-by: guillep2k <18600385+guillep2k@users.noreply.github.com>
Co-authored-by: 6543 <6543@obermui.de>
2020-04-01 09:44:46 +05:30
2023-04-14 23:48:28 +05:30
if err := issue_service . NewIssue ( ctx , repo , issue , labelIDs , attachments , assigneeIDs ) ; err != nil {
[MODERATION] User blocking
- Add the ability to block a user via their profile page.
- This will unstar their repositories and visa versa.
- Blocked users cannot create issues or pull requests on your the doer's repositories (mind that this is not the case for organizations).
- Blocked users cannot comment on the doer's opened issues or pull requests.
- Blocked users cannot add reactions to doer's comments.
- Blocked users cannot cause a notification trough mentioning the doer.
Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/540
(cherry picked from commit 687d852480388897db4d7b0cb397cf7135ab97b1)
(cherry picked from commit 0c32a4fde531018f74e01d9db6520895fcfa10cc)
(cherry picked from commit 1791130e3cb8470b9b39742e0004d5e4c7d1e64d)
(cherry picked from commit 37858b7e8fb6ba6c6ea0ac2562285b3b144efa19)
(cherry picked from commit a3e2bfd7e9eab82cc2c17061f6bb4e386a108c46)
(cherry picked from commit 7009b9fe87696b6182fab65ae82bf5a25cd39971)
Conflicts: https://codeberg.org/forgejo/forgejo/pulls/1014
routers/web/user/profile.go
templates/user/profile.tmpl
(cherry picked from commit b2aec3479177e725cfc7cbbb9d94753226928d1c)
(cherry picked from commit e2f1b73752f6bd3f830297d8f4ac438837471226)
[MODERATION] organization blocking a user (#802)
- Resolves #476
- Follow up for: #540
- Ensure that the doer and blocked person cannot follow each other.
- Ensure that the block person cannot watch doer's repositories.
- Add unblock button to the blocked user list.
- Add blocked since information to the blocked user list.
- Add extra testing to moderation code.
- Blocked user will unwatch doer's owned repository upon blocking.
- Add flash messages to let the user know the block/unblock action was successful.
- Add "You haven't blocked any users" message.
- Add organization blocking a user.
Co-authored-by: Gusted <postmaster@gusted.xyz>
Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/802
(cherry picked from commit 0505a1042197bd9136b58bc70ec7400a23471585)
(cherry picked from commit 37b4e6ef9b85e97d651cf350c9f3ea272ee8d76a)
(cherry picked from commit c17c121f2cf1f00e2a8d6fd6847705df47d0771e)
[MODERATION] organization blocking a user (#802) (squash)
Changes to adapt to:
6bbccdd177 Improve AJAX link and modal confirm dialog (#25210)
Refs: https://codeberg.org/forgejo/forgejo/pulls/882/files#issuecomment-945962
Refs: https://codeberg.org/forgejo/forgejo/pulls/882#issue-330561
(cherry picked from commit 523635f83cb2a1a4386769b79326088c5c4bbec7)
(cherry picked from commit 4743eaa6a0be0ef47de5b17c211dfe8bad1b7af9)
(cherry picked from commit eff5b43d2e843d5d537756d4fa58a8a010b6b527)
Conflicts: https://codeberg.org/forgejo/forgejo/pulls/1014
routers/web/user/profile.go
(cherry picked from commit 9d359be5ed11237088ccf6328571939af814984e)
(cherry picked from commit b1f3069a22a03734cffbfcd503ce004ba47561b7)
[MODERATION] add user blocking API
- Follow up for: #540, #802
- Add API routes for user blocking from user and organization
perspective.
- The new routes have integration testing.
- The new model functions have unit tests.
- Actually quite boring to write and to read this pull request.
(cherry picked from commit f3afaf15c7e34038363c9ce8e1ef957ec1e22b06)
(cherry picked from commit 6d754db3e5faff93a58fab2867737f81f40f6599)
(cherry picked from commit 2a89ddc0acffa9aea0f02b721934ef9e2b496a88)
(cherry picked from commit 4a147bff7e963ab9dffcfaefa5c2c01c59b4c732)
Conflicts:
routers/api/v1/api.go
templates/swagger/v1_json.tmpl
(cherry picked from commit bb8c33918569f65f25b014f0d7fe6ac20f9036fc)
(cherry picked from commit 5a11569a011b7d0a14391e2b5c07d0af825d7b0e)
(cherry picked from commit 2373c801ee6b84c368b498b16e6ad18650b38f42)
[MODERATION] restore redirect on unblock
ctx.RedirectToFirst(ctx.FormString("redirect_to"), ctx.ContextUser.HomeLink())
was replaced by
ctx.JSONOK()
in 128d77a3a Following up fixes for "Fix inconsistent user profile layout across tabs" (#25739)
thus changing the behavior (nicely spotted by the tests). This
restores it.
(cherry picked from commit 597c243707c3c86e7256faf1e6ba727224554de3)
(cherry picked from commit cfa539e590127b4b953b010fba3dea21c82a1714)
[MODERATION] Add test case (squash)
- Add an test case, to test an property of the function.
(cherry picked from commit 70dadb1916bfef8ba8cbc4e9b042cc8740f45e28)
[MODERATION] Block adding collaborators
- Ensure that the doer and blocked user cannot add each other as
collaborators to repositories.
- The Web UI gets an detailed message of the specific situation, the API
gets an generic Forbidden code.
- Unit tests has been added.
- Integration testing for Web and API has been added.
- This commit doesn't introduce removing each other as collaborators on
the block action, due to the complexity of database calls that needs to
be figured out. That deserves its own commit and test code.
(cherry picked from commit 747be949a1b3cd06f6586512f1af4630e55d7ad4)
[MODERATION] move locale_en-US.ini strings to avoid conflicts
Conflicts:
web_src/css/org.css
web_src/css/user.css
https://codeberg.org/forgejo/forgejo/pulls/1180
(cherry picked from commit e53f955c888ebaafc863a6e463da87f70f5605da)
Conflicts:
services/issue/comments.go
https://codeberg.org/forgejo/forgejo/pulls/1212
(cherry picked from commit b4a454b576eee0c7738b2f7df1acaf5bf7810d12)
Conflicts:
models/forgejo_migrations/migrate.go
options/locale/locale_en-US.ini
services/pull/pull.go
https://codeberg.org/forgejo/forgejo/pulls/1264
[MODERATION] Remove blocked user collaborations with doer
- When the doer blocks an user, who is also an collaborator on an
repository that the doer owns, remove that collaboration.
- Added unit tests.
- Refactor the unit test to be more organized.
(cherry picked from commit ec8701617830152680d69d50d64cb43cc2054a89)
(cherry picked from commit 313e6174d832501c57724ae7a6285194b7b81aab)
[MODERATION] QoL improvements (squash)
- Ensure that organisations cannot be blocked. It currently has no
effect, as all blocked operations cannot be executed from an
organisation standpoint.
- Refactored the API route to make use of the `UserAssignmentAPI`
middleware.
- Make more use of `t.Run` so that the test code is more clear about
which block of code belongs to which test case.
- Added more integration testing (to ensure the organisations cannot be
blocked and some authorization/permission checks).
(cherry picked from commit e9d638d0756ee20b6bf1eb999c988533a5066a68)
[MODERATION] s/{{avatar/{{ctx.AvatarUtils.Avatar/
(cherry picked from commit ce8b30be1327ab98df2ba061dd7e2a278b278c5b)
(cherry picked from commit f911dc402508b04cd5d5fb2f3332c2d640e4556e)
Conflicts:
options/locale/locale_en-US.ini
https://codeberg.org/forgejo/forgejo/pulls/1354
(cherry picked from commit c1b37b7fdaf06ee60da341dff76d703990c08082)
(cherry picked from commit 856a2e09036adf56d987c6eee364c431bc37fb2e)
[MODERATION] Show graceful error on comment creation
- When someone is blocked by the repository owner or issue poster and
try to comment on that issue, they get shown a graceful error.
- Adds integration test.
(cherry picked from commit 490646302e1e3dc3c59c9d75938b4647b6873ce7)
(cherry picked from commit d3d88667cbb928a6ff80658eba8ef0c6c508c9e0)
(cherry picked from commit 6818de13a921753e082b7c3d64c23917cc884e4b)
[MODERATION] Show graceful error on comment creation (squash) typo
(cherry picked from commit 1588d4834a37a744f092f2aeea6c9ef4795d7356)
(cherry picked from commit d510ea52d091503e841d66f2f604348add8b4535)
(cherry picked from commit 8249e93a14f628bb0e89fe3be678e4966539944e)
[MODERATION] Refactor integration testing (squash)
- Motivation for this PR is that I'd noticed that a lot of repeated
calls are happening between the test functions and that certain tests
weren't using helper functions like `GetCSRF`, therefor this refactor of
the integration tests to keep it: clean, small and hopefully more
maintainable and understandable.
- There are now three integration tests: `TestBlockUser`,
`TestBlockUserFromOrganization` and `TestBlockActions` (and has been
moved in that order in the source code).
- `TestBlockUser` is for doing blocking related actions as an user and
`TestBlockUserFromOrganization` as an organisation, even though they
execute the same kind of tests they do not share any database calls or
logic and therefor it currently doesn't make sense to merge them
together (hopefully such oppurtinutiy might be presented in the future).
- `TestBlockActions` now contain all tests for actions that should be
blocked after blocking has happened, most tests now share the same doer
and blocked users and a extra fixture has been added to make this
possible for the comment test.
- Less code, more comments and more re-use between tests.
(cherry picked from commit ffb393213d2f1269aad3c019d039cf60d0fe4b10)
(cherry picked from commit 85505e0f815fede589c272d301c95204f9596985)
(cherry picked from commit 0f3cf17761f6caedb17550f69de96990c2090af1)
[MODERATION] Fix network error (squash)
- Fix network error toast messages on user actions such as follow and
unfollow. This happened because the javascript code now expects an JSON
to be returned, but this wasn't the case due to
cfa539e590127b4953b010fba3dea21c82a1714.
- The integration testing has been adjusted to instead test for the
returned flash cookie.
(cherry picked from commit 112bc25e548d317a4ee00f9efa9068794a733e3b)
(cherry picked from commit 1194fe4899eb39dcb9a2410032ad0cc67a62b92b)
(cherry picked from commit 9abb95a8441e227874fe156095349a3173cc5a81)
[MODERATION] Modernize frontend (squash)
- Unify blocked users list.
- Use the new flex list classes for blocked users list to avoid using
the CSS helper classes and thereby be consistent in the design.
- Fix the modal by using the new modal class.
- Remove the icon in the modal as looks too big in the new design.
- Fix avatar not displaying as it was passing the context where the user
should've been passed.
- Don't use italics for 'Blocked since' text.
- Use namelink template to display the user's name and homelink.
(cherry picked from commit ec935a16a319b14e819ead828d1d9875280d9259)
(cherry picked from commit 67f37c83461aa393c53a799918e9708cb9b89b30)
Conflicts:
models/user/follow.go
models/user/user_test.go
routers/api/v1/user/follower.go
routers/web/shared/user/header.go
routers/web/user/profile.go
templates/swagger/v1_json.tmpl
https://codeberg.org/forgejo/forgejo/pulls/1468
(cherry picked from commit 6a9626839c6342cd2767ea12757ee2f78eaf443b)
Conflicts:
tests/integration/api_nodeinfo_test.go
https://codeberg.org/forgejo/forgejo/pulls/1508#issuecomment-1242385
(cherry picked from commit 7378b251b481ed1e60e816caf8f649e8397ee5fc)
Conflicts:
models/fixtures/watch.yml
models/issues/reaction.go
models/issues/reaction_test.go
routers/api/v1/repo/issue_reaction.go
routers/web/repo/issue.go
services/issue/issue.go
https://codeberg.org/forgejo/forgejo/pulls/1547
(cherry picked from commit c2028930c101223820de0bbafc318e9394c347b8)
(cherry picked from commit d3f9134aeeef784586e8412e8dbba0a8fceb0cd4)
(cherry picked from commit 7afe154c5c40bcc65accdf51c9224b2f7627a684)
(cherry picked from commit 99ac7353eb1e834a77fe42aa89208791cc2364ff)
(cherry picked from commit a9cde00c5c25ea8c427967cb7ab57abb618e44cb)
Conflicts:
services/user/delete.go
https://codeberg.org/forgejo/forgejo/pulls/1736
(cherry picked from commit 008c0cc63d1a3b8eb694bffbf77a7b25c56afd57)
[DEADCODE] add exceptions
(cherry picked from commit 12ddd2b10e3309f6430b0af42855c6af832832ee)
[MODERATION] Remove deadcode (squash)
- Remove deadcode that's no longer used by Forgejo.
(cherry picked from commit 0faeab4fa9b0aa59f86760b24ecbc07815026c82)
[MODERATION] Add repo transfers to blocked functionality (squash)
- When someone gets blocked, remove all pending repository transfers
from the blocked user to the doer.
- Do not allow to start transferring repositories to the doer as blocked user.
- Added unit testing.
- Added integration testing.
(cherry picked from commit 8a3caac33013482ddbee2fa51510c6918ba54466)
(cherry picked from commit a92b4cfeb63b90eb2d90d0feb51cec62e0502d84)
(cherry picked from commit acaaaf07d999974dbe5f9c5e792621c597bfb542)
(cherry picked from commit 735818863c1793aa6f6983afedc4bd3b36026ca5)
(cherry picked from commit f50fa43b32160d0d88eca1dbdca09b5f575fb62b)
(cherry picked from commit e16683643388fb3c60ea478f1419a6af4f4aa283)
(cherry picked from commit 82a0e4a3814a66ce44be6a031bdf08484586c61b)
(cherry picked from commit ff233c19c4a5edcc2b99a6f41a2d19dbe8c08b3b)
(cherry picked from commit 8ad87d215f2b6adb978de77e53ba2bf7ea571430)
[MODERATION] Fix unblock action (squash)
- Pass the whole context instead of only giving pieces.
- This fixes CSRF not correctly being inserted into the unblock buttons.
(cherry picked from commit 2aa51922ba6a0ea2f8644277baa74fc8f34ab95a)
(cherry picked from commit 7ee8db0f018340bc97f125415503e3e5db5f5082)
(cherry picked from commit e4f8b999bcd3b68b3ef7f54f5b17c3ada0308121)
(cherry picked from commit 05aea60b1302bbd3ea574a9c6c34e1005a5d73bf)
(cherry picked from commit dc0d61b012cfaf2385f71e97cda5f220b58b9fa4)
(cherry picked from commit f53fa583de671ff60a0a1d0f3ab8c260e1ba4e1f)
(cherry picked from commit c65b89a58d11b32009c710c2f5e75f0cd3539395)
(cherry picked from commit 69e50b9969db3ab71cefaed520757876a9629a5c)
(cherry picked from commit ec127440b86cb5fcf51799d8bd76a9fd6b9cebcc)
[MODERATION] cope with shared fixtures
* There is one more issue in the fixtures and this breaks some tests
* The users in the shared fixtures were renamed for clarity and that
breaks some tests
(cherry picked from commit 707a4edbdf67d0eb168d7bb430cf85dd8cd63c52)
Conflicts:
modules/indexer/issues/indexer_test.go
https://codeberg.org/forgejo/forgejo/pulls/1508
(cherry picked from commit 82cc044366c749df80ffad44eed2988b8e64211e)
(cherry picked from commit 2776aec7e85850f1d7f01a090a72491550fb9d29)
(cherry picked from commit 1fbde36dc784b5b2cc6193f02ff0d436b0f2a629)
(cherry picked from commit 1293db3c4e5df218501f5add9f9d41101ffcb8aa)
(cherry picked from commit 6476802175bac3ef78dd8f24ff6bebc16f398a78)
(cherry picked from commit 5740f2fc830356acb7929a02fe304008b94a0ca5)
(cherry picked from commit afc12d7b6e9b773fa89718aa79cd95c0e0ce4406)
[MODERATION] Fix transfer confirmation (squash)
- Fix problem caused by the clearer confirmation for dangerous actions commit.
(cherry picked from commit 3488f4a9cb1f7f73103ae0017d644f13ca3ab798)
(cherry picked from commit ed7de91f6ace23a1459bc6552edf719d62c7c941)
(cherry picked from commit 2d97929b9b7b8d979eb12bf0994d3f169d41f7fd)
(cherry picked from commit 50d035a7b058b9c4486c38cd4be0b02a4e1bf4d9)
(cherry picked from commit 0a0c07d78a1dee3489b97ab359bb957e3f7fb94b)
(cherry picked from commit 85e55c4dbc2f513f3d5254dac20915e8c3c22886)
(cherry picked from commit d8282122ad6e8b497de35d1ed89e3093a2cd5ee2)
(cherry picked from commit 3f0b3b6cc582c3d672d371dd9fe1203a56cb88c0)
[MODERATION] Purge issues on user deletion (squash)
(cherry picked from commit 4f529d9596ffbfc4e754c28830ba028f6344dc5b)
(cherry picked from commit f0e3acadd321fcb99e8ea3e3ce1c69df25c4ca4d)
(cherry picked from commit 682c4effe69dc0d4ed304fa7ce6259d9ce573629)
(cherry picked from commit e43c2d84fd4b6fd31e2370cec1034262d12e5c34)
(cherry picked from commit 9c8e53ccc78053026e4f667889959c23c8d95934)
(cherry picked from commit a9eb7ac783b2c16ee3702a88203bf857cb4147fc)
[MODERATION] Purge issues on user deletion (squash) revert shared fixtures workarounds
(cherry picked from commit 7224653a40e32186892e89bfedd49edecf5b8f81)
(cherry picked from commit aa6e8672f9473a9100e7575051dec9eda37709a0)
(cherry picked from commit 58c7947e95648f50237ddcd46b6bd025b224a70f)
(cherry picked from commit f1aacb1851b232082febcd7870a40a56de3855a6)
(cherry picked from commit 0bf174af87f7de9a8d869304f709e2bf41f3dde9)
(cherry picked from commit f9706f4335df3b7688ed60853d917efa72fb464a)
[MODERATION] Prepare moderation for context locale changes (squash)
- Resolves https://codeberg.org/forgejo/forgejo/issues/1711
(cherry picked from commit 2e289baea943dcece88f02d110b03d344308a261)
(cherry picked from commit 97b16bc19ae680db62608d6020b00fe5ac451c60)
[MODERATION] User blocking (squash) do not use shared fixture
It conflicts with a fixtured added in the commit
Fix comment permissions (#28213) (#28216)
(cherry picked from commit ab40799dcab24e9f495d765268b791931da81684)
(cherry picked from commit 996c92cafdb5b33a6d2d05d94038e950d97eb7de)
(cherry picked from commit 259912e3a69071c5ad57871464d0b79f69a8e72c)
Conflicts:
options/locale/locale_en-US.ini
https://codeberg.org/forgejo/forgejo/pulls/1921
(cherry picked from commit 1e82abc032c18015b92c93a7617a5dd06d50bd2d)
(cherry picked from commit a176fee1607d571b25b345184f1c50d403029610)
(cherry picked from commit 0480b76dfeda968849e900da9454a3efd82590fa)
(cherry picked from commit 4bc06b7b3841c74e3d790b1ef635c2b382ca7123)
(cherry picked from commit 073094cf722a927a623408d66537c758d7d64e4c)
(cherry picked from commit ac6201c647a4d3a2cfb2b0303b851a8fe7a29444)
(cherry picked from commit 7e0812674da3fbd1e96bdda820962edad6826fbd)
(cherry picked from commit 068c741e5696957710b3d1c2e18c00be2ffaa278)
Conflicts:
models/repo_transfer.go
models/repo_transfer_test.go
routers/web/user/profile.go
https://codeberg.org/forgejo/forgejo/pulls/2298
2023-08-15 04:37:38 +05:30
if errors . Is ( err , user_model . ErrBlockedByUser ) {
ctx . RenderWithErr ( ctx . Tr ( "repo.issues.blocked_by_user" ) , tplIssueNew , form )
return
} else if repo_model . IsErrUserDoesNotHaveAccessToRepo ( err ) {
2021-04-05 21:00:52 +05:30
ctx . Error ( http . StatusBadRequest , "UserDoesNotHaveAccessToRepo" , err . Error ( ) )
2018-05-09 21:59:04 +05:30
return
}
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "NewIssue" , err )
2014-07-26 11:58:04 +05:30
return
2015-08-10 21:01:59 +05:30
}
2020-08-17 08:37:38 +05:30
if projectID > 0 {
2022-06-30 21:25:08 +05:30
if ! ctx . Repo . CanRead ( unit . TypeProjects ) {
// User must also be able to see the project.
ctx . Error ( http . StatusBadRequest , "user hasn't permissions to read projects" )
return
}
2024-05-08 19:14:57 +05:30
if err := issues_model . IssueAssignOrRemoveProject ( ctx , issue , ctx . Doer , projectID , 0 ) ; err != nil {
ctx . ServerError ( "IssueAssignOrRemoveProject" , err )
2020-08-17 08:37:38 +05:30
return
}
}
2015-09-02 04:37:02 +05:30
log . Trace ( "Issue created: %d/%d" , repo . ID , issue . ID )
2023-04-08 13:47:50 +05:30
if ctx . FormString ( "redirect_after_creation" ) == "project" && projectID > 0 {
2023-06-16 12:02:43 +05:30
ctx . JSONRedirect ( ctx . Repo . RepoLink + "/projects/" + strconv . FormatInt ( projectID , 10 ) )
2021-10-06 00:51:52 +05:30
} else {
2023-06-16 12:02:43 +05:30
ctx . JSONRedirect ( issue . Link ( ) )
2021-10-06 00:51:52 +05:30
}
2014-07-26 11:58:04 +05:30
}
2023-08-24 10:36:17 +05:30
// roleDescriptor returns the role descriptor for a comment in/with the given repo, poster and issue
2023-02-15 22:59:13 +05:30
func roleDescriptor ( ctx stdCtx . Context , repo * repo_model . Repository , poster * user_model . User , issue * issues_model . Issue , hasOriginalAuthor bool ) ( issues_model . RoleDescriptor , error ) {
2023-08-24 10:36:17 +05:30
roleDescriptor := issues_model . RoleDescriptor { }
2023-02-15 22:59:13 +05:30
if hasOriginalAuthor {
2023-08-24 10:36:17 +05:30
return roleDescriptor , nil
2023-02-15 22:59:13 +05:30
}
2022-05-11 15:39:36 +05:30
perm , err := access_model . GetUserRepoPermission ( ctx , repo , poster )
2018-11-28 16:56:14 +05:30
if err != nil {
2023-08-24 10:36:17 +05:30
return roleDescriptor , err
2017-12-21 13:13:26 +05:30
}
2020-11-28 21:22:29 +05:30
2023-08-24 10:36:17 +05:30
// If the poster is the actual poster of the issue, enable Poster role.
roleDescriptor . IsPoster = issue . IsPoster ( poster . ID )
2020-11-28 21:22:29 +05:30
2021-11-11 11:59:30 +05:30
// Check if the poster is owner of the repo.
if perm . IsOwner ( ) {
2023-08-24 10:36:17 +05:30
// If the poster isn't an admin, enable the owner role.
2021-11-11 11:59:30 +05:30
if ! poster . IsAdmin {
2023-08-24 10:36:17 +05:30
roleDescriptor . RoleInRepo = issues_model . RoleRepoOwner
return roleDescriptor , nil
}
2020-11-28 21:22:29 +05:30
2023-08-24 10:36:17 +05:30
// Otherwise check if poster is the real repo admin.
2023-10-03 16:00:41 +05:30
ok , err := access_model . IsUserRealRepoAdmin ( ctx , repo , poster )
2023-08-24 10:36:17 +05:30
if err != nil {
return roleDescriptor , err
}
if ok {
roleDescriptor . RoleInRepo = issues_model . RoleRepoOwner
return roleDescriptor , nil
2020-11-28 21:22:29 +05:30
}
2021-11-11 11:59:30 +05:30
}
2020-11-28 21:22:29 +05:30
2023-08-24 10:36:17 +05:30
// If repo is organization, check Member role
if err := repo . LoadOwner ( ctx ) ; err != nil {
return roleDescriptor , err
}
if repo . Owner . IsOrganization ( ) {
if isMember , err := organization . IsOrganizationMember ( ctx , repo . Owner . ID , poster . ID ) ; err != nil {
return roleDescriptor , err
} else if isMember {
roleDescriptor . RoleInRepo = issues_model . RoleRepoMember
return roleDescriptor , nil
}
2020-11-28 21:22:29 +05:30
}
2023-08-24 10:36:17 +05:30
// If the poster is the collaborator of the repo
if isCollaborator , err := repo_model . IsCollaborator ( ctx , repo . ID , poster . ID ) ; err != nil {
return roleDescriptor , err
} else if isCollaborator {
roleDescriptor . RoleInRepo = issues_model . RoleRepoCollaborator
return roleDescriptor , nil
}
hasMergedPR , err := issues_model . HasMergedPullRequestInRepo ( ctx , repo . ID , poster . ID )
if err != nil {
return roleDescriptor , err
} else if hasMergedPR {
roleDescriptor . RoleInRepo = issues_model . RoleRepoContributor
2023-11-27 16:16:55 +05:30
} else if issue . IsPull {
2023-08-24 10:36:17 +05:30
// only display first time contributor in the first opening pull request
roleDescriptor . RoleInRepo = issues_model . RoleRepoFirstTimeContributor
2017-12-21 13:13:26 +05:30
}
2018-11-28 16:56:14 +05:30
2021-11-11 11:59:30 +05:30
return roleDescriptor , nil
2017-12-21 13:13:26 +05:30
}
2022-06-13 15:07:59 +05:30
func getBranchData ( ctx * context . Context , issue * issues_model . Issue ) {
2019-12-16 11:50:25 +05:30
ctx . Data [ "BaseBranch" ] = nil
ctx . Data [ "HeadBranch" ] = nil
ctx . Data [ "HeadUserName" ] = nil
ctx . Data [ "BaseName" ] = ctx . Repo . Repository . OwnerName
if issue . IsPull {
pull := issue . PullRequest
ctx . Data [ "BaseBranch" ] = pull . BaseBranch
ctx . Data [ "HeadBranch" ] = pull . HeadBranch
2022-11-19 13:42:33 +05:30
ctx . Data [ "HeadUserName" ] = pull . MustHeadUserName ( ctx )
2019-12-16 11:50:25 +05:30
}
}
2016-11-24 12:34:31 +05:30
// ViewIssue render issue view page
2016-03-11 22:26:52 +05:30
func ViewIssue ( ctx * context . Context ) {
2019-12-14 06:23:32 +05:30
if ctx . Params ( ":type" ) == "issues" {
// If issue was requested we check if repo has external tracker and redirect
2022-12-10 08:16:31 +05:30
extIssueUnit , err := ctx . Repo . Repository . GetUnit ( ctx , unit . TypeExternalTracker )
2019-12-14 06:23:32 +05:30
if err == nil && extIssueUnit != nil {
if extIssueUnit . ExternalTrackerConfig ( ) . ExternalTrackerStyle == markup . IssueNameStyleNumeric || extIssueUnit . ExternalTrackerConfig ( ) . ExternalTrackerStyle == "" {
2023-10-11 09:54:07 +05:30
metas := ctx . Repo . Repository . ComposeMetas ( ctx )
2019-12-14 06:23:32 +05:30
metas [ "index" ] = ctx . Params ( ":index" )
2022-04-01 14:17:50 +05:30
res , err := vars . Expand ( extIssueUnit . ExternalTrackerConfig ( ) . ExternalTrackerFormat , metas )
if err != nil {
log . Error ( "unable to expand template vars for issue url. issue: %s, err: %v" , metas [ "index" ] , err )
ctx . ServerError ( "Expand" , err )
return
}
ctx . Redirect ( res )
2019-12-14 06:23:32 +05:30
return
}
2021-12-10 06:57:50 +05:30
} else if err != nil && ! repo_model . IsErrUnitTypeNotExist ( err ) {
2019-12-14 06:23:32 +05:30
ctx . ServerError ( "GetUnit" , err )
2019-12-07 09:51:18 +05:30
return
}
}
2023-07-22 19:44:27 +05:30
issue , err := issues_model . GetIssueByIndex ( ctx , ctx . Repo . Repository . ID , ctx . ParamsInt64 ( ":index" ) )
2014-07-26 11:58:04 +05:30
if err != nil {
2022-06-13 15:07:59 +05:30
if issues_model . IsErrIssueNotExist ( err ) {
2018-01-11 03:04:17 +05:30
ctx . NotFound ( "GetIssueByIndex" , err )
2014-07-26 11:58:04 +05:30
} else {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "GetIssueByIndex" , err )
2014-07-26 11:58:04 +05:30
}
return
}
2021-11-16 23:48:25 +05:30
if issue . Repo == nil {
issue . Repo = ctx . Repo . Repository
}
2017-03-30 05:01:47 +05:30
2015-09-02 04:37:02 +05:30
// Make sure type and URL matches.
if ctx . Params ( ":type" ) == "issues" && issue . IsPull {
2021-11-16 23:48:25 +05:30
ctx . Redirect ( issue . Link ( ) )
2015-09-02 04:37:02 +05:30
return
} else if ctx . Params ( ":type" ) == "pulls" && ! issue . IsPull {
2021-11-16 23:48:25 +05:30
ctx . Redirect ( issue . Link ( ) )
2015-09-02 04:37:02 +05:30
return
}
2015-09-02 13:38:05 +05:30
if issue . IsPull {
2016-03-07 10:27:46 +05:30
MustAllowPulls ( ctx )
if ctx . Written ( ) {
return
}
ctx . Data [ "PageIsPullList" ] = true
2015-09-02 13:38:05 +05:30
ctx . Data [ "PageIsPullConversation" ] = true
} else {
2015-12-05 08:00:33 +05:30
MustEnableIssues ( ctx )
if ctx . Written ( ) {
return
}
2015-09-02 13:38:05 +05:30
ctx . Data [ "PageIsIssueList" ] = true
2023-05-09 05:00:14 +05:30
ctx . Data [ "NewIssueChooseTemplate" ] = issue_service . HasTemplatesOrContactLinks ( ctx . Repo . Repository , ctx . Repo . GitRepo )
2015-09-02 13:38:05 +05:30
}
2021-11-10 01:27:58 +05:30
if issue . IsPull && ! ctx . Repo . CanRead ( unit . TypeIssues ) {
2020-01-19 12:13:38 +05:30
ctx . Data [ "IssueType" ] = "pulls"
2021-11-10 01:27:58 +05:30
} else if ! issue . IsPull && ! ctx . Repo . CanRead ( unit . TypePullRequests ) {
2020-01-19 12:13:38 +05:30
ctx . Data [ "IssueType" ] = "issues"
} else {
ctx . Data [ "IssueType" ] = "all"
}
2021-11-10 01:27:58 +05:30
ctx . Data [ "IsProjectsEnabled" ] = ctx . Repo . CanRead ( unit . TypeProjects )
2020-10-05 11:19:33 +05:30
ctx . Data [ "IsAttachmentEnabled" ] = setting . Attachment . Enabled
upload . AddUploadContext ( ctx , "comment" )
2018-11-28 16:56:14 +05:30
2022-06-13 15:07:59 +05:30
if err = issue . LoadAttributes ( ctx ) ; err != nil {
2019-09-20 11:15:38 +05:30
ctx . ServerError ( "LoadAttributes" , err )
return
}
if err = filterXRefComments ( ctx , issue ) ; err != nil {
ctx . ServerError ( "filterXRefComments" , err )
2018-12-13 21:25:43 +05:30
return
}
2024-02-25 04:04:51 +05:30
ctx . Data [ "Title" ] = fmt . Sprintf ( "#%d - %s" , issue . Index , emoji . ReplaceAliases ( issue . Title ) )
2018-11-28 16:56:14 +05:30
2022-06-13 15:07:59 +05:30
iw := new ( issues_model . IssueWatch )
2022-03-22 12:33:22 +05:30
if ctx . Doer != nil {
iw . UserID = ctx . Doer . ID
2020-04-21 19:18:53 +05:30
iw . IssueID = issue . ID
2023-09-16 20:09:12 +05:30
iw . IsWatching , err = issues_model . CheckIssueWatch ( ctx , ctx . Doer , issue )
2018-11-28 16:56:14 +05:30
if err != nil {
2021-01-15 01:57:22 +05:30
ctx . ServerError ( "CheckIssueWatch" , err )
2018-11-28 16:56:14 +05:30
return
}
}
ctx . Data [ "IssueWatch" ] = iw
2021-04-20 03:55:08 +05:30
issue . RenderedContent , err = markdown . RenderString ( & markup . RenderContext {
2024-01-15 14:19:24 +05:30
Links : markup . Links {
Base : ctx . Repo . RepoLink ,
} ,
Metas : ctx . Repo . Repository . ComposeMetas ( ctx ) ,
GitRepo : ctx . Repo . GitRepo ,
Ctx : ctx ,
2021-04-20 03:55:08 +05:30
} , issue . Content )
if err != nil {
ctx . ServerError ( "RenderString" , err )
return
}
2014-07-26 11:58:04 +05:30
2015-08-14 22:12:43 +05:30
repo := ctx . Repo . Repository
2015-09-02 04:37:02 +05:30
// Get more information if it's a pull request.
if issue . IsPull {
2016-08-16 22:49:09 +05:30
if issue . PullRequest . HasMerged {
ctx . Data [ "DisableStatusChange" ] = issue . PullRequest . HasMerged
2015-09-02 18:56:56 +05:30
PrepareMergedViewPullInfo ( ctx , issue )
} else {
PrepareViewPullInfo ( ctx , issue )
2019-04-21 02:20:34 +05:30
ctx . Data [ "DisableStatusChange" ] = ctx . Data [ "IsPullRequestBroken" ] == true && issue . IsClosed
2015-09-02 18:56:56 +05:30
}
2015-09-02 13:38:05 +05:30
if ctx . Written ( ) {
2015-09-02 04:37:02 +05:30
return
}
}
2015-08-12 14:34:23 +05:30
// Metas.
2015-08-14 22:12:43 +05:30
// Check labels.
2022-10-12 10:48:26 +05:30
labelIDMark := make ( container . Set [ int64 ] )
for _ , label := range issue . Labels {
labelIDMark . Add ( label . ID )
2015-08-14 22:12:43 +05:30
}
2022-06-13 15:07:59 +05:30
labels , err := issues_model . GetLabelsByRepoID ( ctx , repo . ID , "" , db . ListOptions { } )
2015-08-14 22:12:43 +05:30
if err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "GetLabelsByRepoID" , err )
2015-08-14 22:12:43 +05:30
return
}
Add Organization Wide Labels (#10814)
* Add organization wide labels
Implement organization wide labels similar to organization wide
webhooks. This lets you create individual labels for organizations that can be used
for all repos under that organization (so being able to reuse the same
label across multiple repos).
This makes it possible for small organizations with many repos to use
labels effectively.
Fixes #7406
* Add migration
* remove comments
* fix tests
* Update options/locale/locale_en-US.ini
Removed unused translation string
* show org labels in issue search label filter
* Use more clear var name
* rename migration after merge from master
* comment typo
* update migration again after rebase with master
* check for orgID <=0 per guillep2k review
* fmt
* Apply suggestions from code review
Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>
* remove unused code
* Make sure RepoID is 0 when searching orgID per code review
* more changes/code review requests
* More descriptive translation var per code review
* func description/delete comment when issue label deleted instead of hiding it
* remove comment
* only use issues in that repo when calculating number of open issues for org label on repo label page
* Add integration test for IssuesSearch API with labels
* remove unused function
* Update models/issue_label.go
Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>
* Use subquery in GetLabelIDsInReposByNames
* Fix tests to use correct orgID
* fix more tests
* IssuesSearch api now uses new BuildLabelNamesIssueIDsCondition. Add a few more tests as well
* update comment for clarity
* Revert previous code change now that we can use the new BuildLabelNamesIssueIDsCondition
* Don't sort repos by date in IssuesSearch API
After much debugging I've found a strange issue where in some cases MySQL will return a different result than other enigines if a query is sorted by a null collumn. For example with our integration test data where we don't set updated_unix in repository fixtures:
SELECT `id`, `owner_id`, `owner_name`, `lower_name`, `name`, `description`, `website`, `original_service_type`, `original_url`, `default_branch`, `num_watches`, `num_stars`, `num_forks`, `num_issues`, `num_closed_issues`, `num_pulls`, `num_closed_pulls`, `num_milestones`, `num_closed_milestones`, `is_private`, `is_empty`, `is_archived`, `is_mirror`, `status`, `is_fork`, `fork_id`, `is_template`, `template_id`, `size`, `is_fsck_enabled`, `close_issues_via_commit_in_any_branch`, `topics`, `avatar`, `created_unix`, `updated_unix` FROM `repository` ORDER BY updated_unix DESC LIMIT 15 OFFSET 45
Returns different results for MySQL than other engines. However, the similar query:
SELECT `id`, `owner_id`, `owner_name`, `lower_name`, `name`, `description`, `website`, `original_service_type`, `original_url`, `default_branch`, `num_watches`, `num_stars`, `num_forks`, `num_issues`, `num_closed_issues`, `num_pulls`, `num_closed_pulls`, `num_milestones`, `num_closed_milestones`, `is_private`, `is_empty`, `is_archived`, `is_mirror`, `status`, `is_fork`, `fork_id`, `is_template`, `template_id`, `size`, `is_fsck_enabled`, `close_issues_via_commit_in_any_branch`, `topics`, `avatar`, `created_unix`, `updated_unix` FROM `repository` ORDER BY updated_unix DESC LIMIT 15 OFFSET 30
Returns the same results.
This causes integration tests to fail on MySQL in certain cases but would never show up in a real installation. Since this API call always returns issues based on the optionally provided repo_priority_id or the issueID itself, there is no change to results by changing the repo sorting method used to get ids earlier in the function.
* linter is back!
* code review
* remove now unused option
* Fix newline at end of files
* more unused code
* update to master
* check for matching ids before query
* Update models/issue_label.go
Co-Authored-By: 6543 <6543@obermui.de>
* Update models/issue_label.go
* update comments
* Update routers/org/setting.go
Co-authored-by: Lauris BH <lauris@nix.lv>
Co-authored-by: guillep2k <18600385+guillep2k@users.noreply.github.com>
Co-authored-by: 6543 <6543@obermui.de>
2020-04-01 09:44:46 +05:30
ctx . Data [ "Labels" ] = labels
if repo . Owner . IsOrganization ( ) {
2022-06-13 15:07:59 +05:30
orgLabels , err := issues_model . GetLabelsByOrgID ( ctx , repo . Owner . ID , ctx . FormString ( "sort" ) , db . ListOptions { } )
Add Organization Wide Labels (#10814)
* Add organization wide labels
Implement organization wide labels similar to organization wide
webhooks. This lets you create individual labels for organizations that can be used
for all repos under that organization (so being able to reuse the same
label across multiple repos).
This makes it possible for small organizations with many repos to use
labels effectively.
Fixes #7406
* Add migration
* remove comments
* fix tests
* Update options/locale/locale_en-US.ini
Removed unused translation string
* show org labels in issue search label filter
* Use more clear var name
* rename migration after merge from master
* comment typo
* update migration again after rebase with master
* check for orgID <=0 per guillep2k review
* fmt
* Apply suggestions from code review
Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>
* remove unused code
* Make sure RepoID is 0 when searching orgID per code review
* more changes/code review requests
* More descriptive translation var per code review
* func description/delete comment when issue label deleted instead of hiding it
* remove comment
* only use issues in that repo when calculating number of open issues for org label on repo label page
* Add integration test for IssuesSearch API with labels
* remove unused function
* Update models/issue_label.go
Co-Authored-By: guillep2k <18600385+guillep2k@users.noreply.github.com>
* Use subquery in GetLabelIDsInReposByNames
* Fix tests to use correct orgID
* fix more tests
* IssuesSearch api now uses new BuildLabelNamesIssueIDsCondition. Add a few more tests as well
* update comment for clarity
* Revert previous code change now that we can use the new BuildLabelNamesIssueIDsCondition
* Don't sort repos by date in IssuesSearch API
After much debugging I've found a strange issue where in some cases MySQL will return a different result than other enigines if a query is sorted by a null collumn. For example with our integration test data where we don't set updated_unix in repository fixtures:
SELECT `id`, `owner_id`, `owner_name`, `lower_name`, `name`, `description`, `website`, `original_service_type`, `original_url`, `default_branch`, `num_watches`, `num_stars`, `num_forks`, `num_issues`, `num_closed_issues`, `num_pulls`, `num_closed_pulls`, `num_milestones`, `num_closed_milestones`, `is_private`, `is_empty`, `is_archived`, `is_mirror`, `status`, `is_fork`, `fork_id`, `is_template`, `template_id`, `size`, `is_fsck_enabled`, `close_issues_via_commit_in_any_branch`, `topics`, `avatar`, `created_unix`, `updated_unix` FROM `repository` ORDER BY updated_unix DESC LIMIT 15 OFFSET 45
Returns different results for MySQL than other engines. However, the similar query:
SELECT `id`, `owner_id`, `owner_name`, `lower_name`, `name`, `description`, `website`, `original_service_type`, `original_url`, `default_branch`, `num_watches`, `num_stars`, `num_forks`, `num_issues`, `num_closed_issues`, `num_pulls`, `num_closed_pulls`, `num_milestones`, `num_closed_milestones`, `is_private`, `is_empty`, `is_archived`, `is_mirror`, `status`, `is_fork`, `fork_id`, `is_template`, `template_id`, `size`, `is_fsck_enabled`, `close_issues_via_commit_in_any_branch`, `topics`, `avatar`, `created_unix`, `updated_unix` FROM `repository` ORDER BY updated_unix DESC LIMIT 15 OFFSET 30
Returns the same results.
This causes integration tests to fail on MySQL in certain cases but would never show up in a real installation. Since this API call always returns issues based on the optionally provided repo_priority_id or the issueID itself, there is no change to results by changing the repo sorting method used to get ids earlier in the function.
* linter is back!
* code review
* remove now unused option
* Fix newline at end of files
* more unused code
* update to master
* check for matching ids before query
* Update models/issue_label.go
Co-Authored-By: 6543 <6543@obermui.de>
* Update models/issue_label.go
* update comments
* Update routers/org/setting.go
Co-authored-by: Lauris BH <lauris@nix.lv>
Co-authored-by: guillep2k <18600385+guillep2k@users.noreply.github.com>
Co-authored-by: 6543 <6543@obermui.de>
2020-04-01 09:44:46 +05:30
if err != nil {
ctx . ServerError ( "GetLabelsByOrgID" , err )
return
}
ctx . Data [ "OrgLabels" ] = orgLabels
labels = append ( labels , orgLabels ... )
}
2015-08-14 22:12:43 +05:30
hasSelected := false
for i := range labels {
2022-10-12 10:48:26 +05:30
if labelIDMark . Contains ( labels [ i ] . ID ) {
2015-08-14 22:12:43 +05:30
labels [ i ] . IsChecked = true
hasSelected = true
}
}
ctx . Data [ "HasSelectedLabel" ] = hasSelected
// Check milestone and assignee.
2018-11-28 16:56:14 +05:30
if ctx . Repo . CanWriteIssuesOrPulls ( issue . IsPull ) {
2015-09-02 04:37:02 +05:30
RetrieveRepoMilestonesAndAssignees ( ctx , repo )
2020-08-17 08:37:38 +05:30
retrieveProjects ( ctx , repo )
2015-09-02 04:37:02 +05:30
if ctx . Written ( ) {
2015-08-14 22:12:43 +05:30
return
}
}
2014-07-26 11:58:04 +05:30
2020-04-06 22:03:34 +05:30
if issue . IsPull {
2024-02-24 18:08:43 +05:30
canChooseReviewer := false
2022-09-09 22:57:47 +05:30
if ctx . Doer != nil && ctx . IsSigned {
2024-02-24 18:08:43 +05:30
canChooseReviewer = issue_service . CanDoerChangeReviewRequests ( ctx , ctx . Doer , repo , issue )
2020-04-06 22:03:34 +05:30
}
2020-10-13 01:25:13 +05:30
RetrieveRepoReviewers ( ctx , repo , issue , canChooseReviewer )
2020-04-06 22:03:34 +05:30
if ctx . Written ( ) {
return
}
}
2015-08-12 14:34:23 +05:30
if ctx . IsSigned {
2015-08-12 16:14:09 +05:30
// Update issue-user.
2022-08-25 08:01:57 +05:30
if err = activities_model . SetIssueReadBy ( ctx , issue . ID , ctx . Doer . ID ) ; err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "ReadBy" , err )
2015-08-12 16:14:09 +05:30
return
}
2015-08-12 14:34:23 +05:30
}
2015-08-14 00:13:40 +05:30
var (
Fix cannot reopen after pushing commits to a closed PR (#23189)
Close: #22784
1. On GH, we can reopen a PR which was closed before after pushing
commits. After reopening PR, we can see the commits that were pushed
after closing PR in the time line. So the case of
[issue](https://github.com/go-gitea/gitea/issues/22784) is a bug which
needs to be fixed.
2. After closing a PR and pushing commits, `headBranchSha` is not equal
to `sha`(which is the last commit ID string of reference). If the
judgement exists, the button of reopen will not display. So, skip the
judgement if the status of PR is closed.
![image](https://user-images.githubusercontent.com/33891828/222037529-651fccf9-0bba-433e-b2f0-79c17e0cc812.png)
3. Even if PR is already close, we should still insert comment record
into DB when we push commits.
So we should still call function `CreatePushPullComment()`.
https://github.com/go-gitea/gitea/blob/067b0c2664d127c552ccdfd264257caca4907a77/services/pull/pull.go#L260-L282
So, I add a switch(`includeClosed`) to the
`GetUnmergedPullRequestsByHeadInfo` func to control whether the status
of PR must be open. In this case, by setting `includeClosed` to `true`,
we can query the closed PR.
![image](https://user-images.githubusercontent.com/33891828/222621045-bb80987c-10c5-4eac-aa0c-1fb9c6aefb51.png)
4. In the loop of comments, I use the`latestCloseCommentID` variable to
record the last occurrence of the close comment.
In the go template, if the status of PR is closed, the comments whose
type is `CommentTypePullRequestPush(29)` after `latestCloseCommentID`
won't be rendered.
![image](https://user-images.githubusercontent.com/33891828/222058913-c91cf3e3-819b-40c5-8015-654b31eeccff.png)
e.g.
1). The initial status of the PR is opened.
![image](https://user-images.githubusercontent.com/33891828/222453617-33c5093e-f712-4cd6-8489-9f87e2075869.png)
2). Then I click the button of `Close`. PR is closed now.
![image](https://user-images.githubusercontent.com/33891828/222453694-25c588a9-c121-4897-9ae5-0b13cf33d20b.png)
3). I try to push a commit to this PR, even though its current status is
closed.
![image](https://user-images.githubusercontent.com/33891828/222453916-361678fb-7321-410d-9e37-5a26e8095638.png)
But in comments list, this commit do not display.This is as expected :)
![image](https://user-images.githubusercontent.com/33891828/222454169-7617a791-78d2-404e-be5e-77d555f93313.png)
4). Click the `Reopen` button, the commit which is pushed after closing
PR display now.
![image](https://user-images.githubusercontent.com/33891828/222454533-897893b6-b96e-4701-b5cb-b1800f382b8f.png)
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-03-03 18:46:58 +05:30
role issues_model . RoleDescriptor
ok bool
marked = make ( map [ int64 ] issues_model . RoleDescriptor )
comment * issues_model . Comment
participants = make ( [ ] * user_model . User , 1 , 10 )
latestCloseCommentID int64
2015-08-14 00:13:40 +05:30
)
2022-12-10 08:16:31 +05:30
if ctx . Repo . Repository . IsTimetrackerEnabled ( ctx ) {
2017-09-12 12:18:13 +05:30
if ctx . IsSigned {
// Deal with the stopwatch
2023-09-16 20:09:12 +05:30
ctx . Data [ "IsStopwatchRunning" ] = issues_model . StopwatchExists ( ctx , ctx . Doer . ID , issue . ID )
2017-09-12 12:18:13 +05:30
if ! ctx . Data [ "IsStopwatchRunning" ] . ( bool ) {
var exists bool
2023-02-28 00:16:00 +05:30
var swIssue * issues_model . Issue
if exists , _ , swIssue , err = issues_model . HasUserStopwatch ( ctx , ctx . Doer . ID ) ; err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "HasUserStopwatch" , err )
2017-09-12 12:18:13 +05:30
return
}
ctx . Data [ "HasUserStopwatch" ] = exists
if exists {
// Add warning if the user has already a stopwatch
// Add link to the issue of the already running stopwatch
2023-02-28 00:16:00 +05:30
ctx . Data [ "OtherStopwatchURL" ] = swIssue . Link ( )
2017-09-12 12:18:13 +05:30
}
}
2023-10-14 14:07:24 +05:30
ctx . Data [ "CanUseTimetracker" ] = ctx . Repo . CanUseTimetracker ( ctx , issue , ctx . Doer )
2017-09-12 12:18:13 +05:30
} else {
ctx . Data [ "CanUseTimetracker" ] = false
}
2023-10-03 16:00:41 +05:30
if ctx . Data [ "WorkingUsers" ] , err = issues_model . TotalTimesForEachUser ( ctx , & issues_model . FindTrackedTimesOptions { IssueID : issue . ID } ) ; err != nil {
2023-09-29 18:34:14 +05:30
ctx . ServerError ( "TotalTimesForEachUser" , err )
2017-09-12 12:18:13 +05:30
return
}
}
2016-02-02 07:25:12 +05:30
2018-07-18 02:53:58 +05:30
// Check if the user can use the dependencies
2023-10-14 14:07:24 +05:30
ctx . Data [ "CanCreateIssueDependencies" ] = ctx . Repo . CanCreateIssueDependencies ( ctx , ctx . Doer , issue . IsPull )
2018-07-18 02:53:58 +05:30
Allow cross-repository dependencies on issues (#7901)
* in progress changes for #7405, added ability to add cross-repo dependencies
* removed unused repolink var
* fixed query that was breaking ci tests; fixed check in issue dependency add so that the id of the issue and dependency is checked rather than the indexes
* reverted removal of string in local files becasue these are done via crowdin, not updated manually
* removed 'Select("issue.*")' from getBlockedByDependencies and getBlockingDependencies based on comments in PR review
* changed getBlockedByDependencies and getBlockingDependencies to use a more xorm-like query, also updated the sidebar as a result
* simplified the getBlockingDependencies and getBlockedByDependencies methods; changed the sidebar to show the dependencies in a different format where you can see the name of the repository
* made some changes to the issue view in the dependencies (issue name on top, repo full name on separate line). Change view of issue in the dependency search results (also showing the full repo name on separate line)
* replace call to FindUserAccessibleRepoIDs with SearchRepositoryByName. The former was hardcoded to use isPrivate = false on the repo search, but this code needed it to be true. The SearchRepositoryByName method is used more in the code including on the user's dashboard
* some more tweaks to the layout of the issues when showing dependencies and in the search box when you add new dependencies
* added Name to the RepositoryMeta struct
* updated swagger doc
* fixed total count for link header on SearchIssues
* fixed indentation
* fixed aligment of remove icon on dependencies in issue sidebar
* removed unnecessary nil check (unnecessary because issue.loadRepo is called prior to this block)
* reverting .css change, somehow missed or forgot that less is used
* updated less file and generated css; updated sidebar template with styles to line up delete and issue index
* added ordering to the blocked by/depends on queries
* fixed sorting in issue dependency search and the depends on/blocks views to show issues from the current repo first, then by created date descending; added a "all cross repository dependencies" setting to allow this feature to be turned off, if turned off, the issue dependency search will work the way it did before (restricted to the current repository)
* re-applied my swagger changes after merge
* fixed split string condition in issue search
* changed ALLOW_CROSS_REPOSITORY_DEPENDENCIES description to sound more global than just the issue dependency search; returning 400 in the cross repo issue search api method if not enabled; fixed bug where the issue count did not respect the state parameter
* when adding a dependency to an issue, added a check to make sure the issue and dependency are in the same repo if cross repo dependencies is not enabled
* updated sortIssuesSession call in PullRequests, another commit moved this method from pull.go to pull_list.go so I had to re-apply my change here
* fixed incorrect setting of user id parameter in search repos call
2019-10-31 10:36:10 +05:30
// check if dependencies can be created across repositories
ctx . Data [ "AllowCrossRepositoryDependencies" ] = setting . Service . AllowCrossRepositoryDependencies
2023-02-15 22:59:13 +05:30
if issue . ShowRole , err = roleDescriptor ( ctx , repo , issue . Poster , issue , issue . HasOriginalAuthor ( ) ) ; err != nil {
2021-11-11 11:59:30 +05:30
ctx . ServerError ( "roleDescriptor" , err )
2020-09-10 23:39:14 +05:30
return
}
2021-11-11 11:59:30 +05:30
marked [ issue . PosterID ] = issue . ShowRole
2020-09-10 23:39:14 +05:30
2024-03-26 13:18:53 +05:30
// Render comments and fetch participants.
2016-02-02 07:25:12 +05:30
participants [ 0 ] = issue . Poster
2024-03-12 12:53:44 +05:30
if err := issue . Comments . LoadAttachmentsByIssue ( ctx ) ; err != nil {
ctx . ServerError ( "LoadAttachmentsByIssue" , err )
return
}
if err := issue . Comments . LoadPosters ( ctx ) ; err != nil {
ctx . ServerError ( "LoadPosters" , err )
return
}
2015-08-14 00:13:40 +05:30
for _ , comment = range issue . Comments {
2019-05-06 17:39:31 +05:30
comment . Issue = issue
2022-06-13 15:07:59 +05:30
if comment . Type == issues_model . CommentTypeComment || comment . Type == issues_model . CommentTypeReview {
2021-04-20 03:55:08 +05:30
comment . RenderedContent , err = markdown . RenderString ( & markup . RenderContext {
2024-01-15 14:19:24 +05:30
Links : markup . Links {
Base : ctx . Repo . RepoLink ,
} ,
Metas : ctx . Repo . Repository . ComposeMetas ( ctx ) ,
GitRepo : ctx . Repo . GitRepo ,
Ctx : ctx ,
2021-04-20 03:55:08 +05:30
} , comment . Content )
if err != nil {
ctx . ServerError ( "RenderString" , err )
return
}
2015-08-14 00:13:40 +05:30
// Check tag.
2021-11-11 11:59:30 +05:30
role , ok = marked [ comment . PosterID ]
2015-08-14 00:13:40 +05:30
if ok {
2021-11-11 11:59:30 +05:30
comment . ShowRole = role
2015-08-14 00:13:40 +05:30
continue
}
2023-02-15 22:59:13 +05:30
comment . ShowRole , err = roleDescriptor ( ctx , repo , comment . Poster , issue , comment . HasOriginalAuthor ( ) )
2017-12-21 13:13:26 +05:30
if err != nil {
2021-11-11 11:59:30 +05:30
ctx . ServerError ( "roleDescriptor" , err )
2017-12-21 13:13:26 +05:30
return
2015-08-14 00:13:40 +05:30
}
2021-11-11 11:59:30 +05:30
marked [ comment . PosterID ] = comment . ShowRole
2019-09-07 20:23:35 +05:30
participants = addParticipant ( comment . Poster , participants )
2022-06-13 15:07:59 +05:30
} else if comment . Type == issues_model . CommentTypeLabel {
2023-09-29 17:42:54 +05:30
if err = comment . LoadLabel ( ctx ) ; err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "LoadLabel" , err )
2017-01-30 18:16:45 +05:30
return
}
2022-06-13 15:07:59 +05:30
} else if comment . Type == issues_model . CommentTypeMilestone {
2022-11-19 13:42:33 +05:30
if err = comment . LoadMilestone ( ctx ) ; err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "LoadMilestone" , err )
2017-02-01 08:06:08 +05:30
return
}
2022-04-08 14:41:15 +05:30
ghostMilestone := & issues_model . Milestone {
2017-06-17 10:21:28 +05:30
ID : - 1 ,
2024-02-15 03:18:45 +05:30
Name : ctx . Locale . TrString ( "repo.issues.deleted_milestone" ) ,
2017-06-17 10:21:28 +05:30
}
if comment . OldMilestoneID > 0 && comment . OldMilestone == nil {
comment . OldMilestone = ghostMilestone
}
if comment . MilestoneID > 0 && comment . Milestone == nil {
comment . Milestone = ghostMilestone
}
2022-06-13 15:07:59 +05:30
} else if comment . Type == issues_model . CommentTypeProject {
2023-09-29 17:42:54 +05:30
if err = comment . LoadProject ( ctx ) ; err != nil {
2020-08-17 08:37:38 +05:30
ctx . ServerError ( "LoadProject" , err )
return
}
2022-03-29 19:46:31 +05:30
ghostProject := & project_model . Project {
2020-08-17 08:37:38 +05:30
ID : - 1 ,
2024-02-15 03:18:45 +05:30
Title : ctx . Locale . TrString ( "repo.issues.deleted_project" ) ,
2020-08-17 08:37:38 +05:30
}
if comment . OldProjectID > 0 && comment . OldProject == nil {
comment . OldProject = ghostProject
}
if comment . ProjectID > 0 && comment . Project == nil {
comment . Project = ghostProject
}
2022-06-13 15:07:59 +05:30
} else if comment . Type == issues_model . CommentTypeAssignees || comment . Type == issues_model . CommentTypeReviewRequest {
2023-09-29 17:42:54 +05:30
if err = comment . LoadAssigneeUserAndTeam ( ctx ) ; err != nil {
2020-10-13 01:25:13 +05:30
ctx . ServerError ( "LoadAssigneeUserAndTeam" , err )
2017-02-03 20:39:10 +05:30
return
}
2022-06-13 15:07:59 +05:30
} else if comment . Type == issues_model . CommentTypeRemoveDependency || comment . Type == issues_model . CommentTypeAddDependency {
2023-09-29 17:42:54 +05:30
if err = comment . LoadDepIssueDetails ( ctx ) ; err != nil {
2022-06-13 15:07:59 +05:30
if ! issues_model . IsErrIssueNotExist ( err ) {
2020-09-04 07:06:56 +05:30
ctx . ServerError ( "LoadDepIssueDetails" , err )
return
}
2018-07-18 02:53:58 +05:30
}
2023-04-20 12:09:44 +05:30
} else if comment . Type . HasContentSupport ( ) {
2021-04-20 03:55:08 +05:30
comment . RenderedContent , err = markdown . RenderString ( & markup . RenderContext {
2024-01-15 14:19:24 +05:30
Links : markup . Links {
Base : ctx . Repo . RepoLink ,
} ,
Metas : ctx . Repo . Repository . ComposeMetas ( ctx ) ,
GitRepo : ctx . Repo . GitRepo ,
Ctx : ctx ,
2021-04-20 03:55:08 +05:30
} , comment . Content )
if err != nil {
ctx . ServerError ( "RenderString" , err )
return
}
2023-09-29 17:42:54 +05:30
if err = comment . LoadReview ( ctx ) ; err != nil && ! issues_model . IsErrReviewNotExist ( err ) {
2018-08-06 10:13:22 +05:30
ctx . ServerError ( "LoadReview" , err )
return
}
2019-09-07 20:23:35 +05:30
participants = addParticipant ( comment . Poster , participants )
2018-08-06 10:13:22 +05:30
if comment . Review == nil {
continue
}
2022-01-20 04:56:57 +05:30
if err = comment . Review . LoadAttributes ( ctx ) ; err != nil {
2021-11-24 15:19:20 +05:30
if ! user_model . IsErrUserNotExist ( err ) {
2019-05-06 17:39:31 +05:30
ctx . ServerError ( "Review.LoadAttributes" , err )
return
}
2021-11-24 15:19:20 +05:30
comment . Review . Reviewer = user_model . NewGhostUser ( )
2018-08-06 10:13:22 +05:30
}
2022-01-20 04:56:57 +05:30
if err = comment . Review . LoadCodeComments ( ctx ) ; err != nil {
2018-08-06 10:13:22 +05:30
ctx . ServerError ( "Review.LoadCodeComments" , err )
return
}
2021-01-17 22:59:10 +05:30
for _ , codeComments := range comment . Review . CodeComments {
for _ , lineComments := range codeComments {
for _ , c := range lineComments {
// Check tag.
2021-11-11 11:59:30 +05:30
role , ok = marked [ c . PosterID ]
2021-01-17 22:59:10 +05:30
if ok {
2021-11-11 11:59:30 +05:30
c . ShowRole = role
2021-01-17 22:59:10 +05:30
continue
}
2020-04-18 19:20:25 +05:30
2023-02-15 22:59:13 +05:30
c . ShowRole , err = roleDescriptor ( ctx , repo , c . Poster , issue , c . HasOriginalAuthor ( ) )
2021-01-17 22:59:10 +05:30
if err != nil {
2021-11-11 11:59:30 +05:30
ctx . ServerError ( "roleDescriptor" , err )
2021-01-17 22:59:10 +05:30
return
}
2021-11-11 11:59:30 +05:30
marked [ c . PosterID ] = c . ShowRole
2021-01-17 22:59:10 +05:30
participants = addParticipant ( c . Poster , participants )
}
}
}
2023-09-29 17:42:54 +05:30
if err = comment . LoadResolveDoer ( ctx ) ; err != nil {
2020-04-18 19:20:25 +05:30
ctx . ServerError ( "LoadResolveDoer" , err )
return
}
2022-06-13 15:07:59 +05:30
} else if comment . Type == issues_model . CommentTypePullRequestPush {
2020-05-20 18:17:24 +05:30
participants = addParticipant ( comment . Poster , participants )
2022-01-20 04:56:57 +05:30
if err = comment . LoadPushCommits ( ctx ) ; err != nil {
2020-05-20 18:17:24 +05:30
ctx . ServerError ( "LoadPushCommits" , err )
return
}
2022-06-13 15:07:59 +05:30
} else if comment . Type == issues_model . CommentTypeAddTimeManual ||
2023-06-23 17:42:39 +05:30
comment . Type == issues_model . CommentTypeStopTracking ||
comment . Type == issues_model . CommentTypeDeleteTimeManual {
2021-02-19 16:22:11 +05:30
// drop error since times could be pruned from DB..
2023-10-03 16:00:41 +05:30
_ = comment . LoadTime ( ctx )
2023-06-23 17:42:39 +05:30
if comment . Content != "" {
2024-05-09 19:19:37 +05:30
// Content before v1.21 did store the formatted string instead of seconds,
// so "|" is used as delimiter to mark the new format
2023-06-23 17:42:39 +05:30
if comment . Content [ 0 ] != '|' {
// handle old time comments that have formatted text stored
2024-03-01 15:46:19 +05:30
comment . RenderedContent = templates . SanitizeHTML ( comment . Content )
2023-06-23 17:42:39 +05:30
comment . Content = ""
} else {
// else it's just a duration in seconds to pass on to the frontend
comment . Content = comment . Content [ 1 : ]
}
}
2023-04-21 22:06:37 +05:30
}
if comment . Type == issues_model . CommentTypeClose || comment . Type == issues_model . CommentTypeMergePull {
// record ID of the latest closed/merged comment.
Fix cannot reopen after pushing commits to a closed PR (#23189)
Close: #22784
1. On GH, we can reopen a PR which was closed before after pushing
commits. After reopening PR, we can see the commits that were pushed
after closing PR in the time line. So the case of
[issue](https://github.com/go-gitea/gitea/issues/22784) is a bug which
needs to be fixed.
2. After closing a PR and pushing commits, `headBranchSha` is not equal
to `sha`(which is the last commit ID string of reference). If the
judgement exists, the button of reopen will not display. So, skip the
judgement if the status of PR is closed.
![image](https://user-images.githubusercontent.com/33891828/222037529-651fccf9-0bba-433e-b2f0-79c17e0cc812.png)
3. Even if PR is already close, we should still insert comment record
into DB when we push commits.
So we should still call function `CreatePushPullComment()`.
https://github.com/go-gitea/gitea/blob/067b0c2664d127c552ccdfd264257caca4907a77/services/pull/pull.go#L260-L282
So, I add a switch(`includeClosed`) to the
`GetUnmergedPullRequestsByHeadInfo` func to control whether the status
of PR must be open. In this case, by setting `includeClosed` to `true`,
we can query the closed PR.
![image](https://user-images.githubusercontent.com/33891828/222621045-bb80987c-10c5-4eac-aa0c-1fb9c6aefb51.png)
4. In the loop of comments, I use the`latestCloseCommentID` variable to
record the last occurrence of the close comment.
In the go template, if the status of PR is closed, the comments whose
type is `CommentTypePullRequestPush(29)` after `latestCloseCommentID`
won't be rendered.
![image](https://user-images.githubusercontent.com/33891828/222058913-c91cf3e3-819b-40c5-8015-654b31eeccff.png)
e.g.
1). The initial status of the PR is opened.
![image](https://user-images.githubusercontent.com/33891828/222453617-33c5093e-f712-4cd6-8489-9f87e2075869.png)
2). Then I click the button of `Close`. PR is closed now.
![image](https://user-images.githubusercontent.com/33891828/222453694-25c588a9-c121-4897-9ae5-0b13cf33d20b.png)
3). I try to push a commit to this PR, even though its current status is
closed.
![image](https://user-images.githubusercontent.com/33891828/222453916-361678fb-7321-410d-9e37-5a26e8095638.png)
But in comments list, this commit do not display.This is as expected :)
![image](https://user-images.githubusercontent.com/33891828/222454169-7617a791-78d2-404e-be5e-77d555f93313.png)
4). Click the `Reopen` button, the commit which is pushed after closing
PR display now.
![image](https://user-images.githubusercontent.com/33891828/222454533-897893b6-b96e-4701-b5cb-b1800f382b8f.png)
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-03-03 18:46:58 +05:30
// if PR is closed, the comments whose type is CommentTypePullRequestPush(29) after latestCloseCommentID won't be rendered.
latestCloseCommentID = comment . ID
2015-08-13 13:37:11 +05:30
}
}
2014-07-26 11:58:04 +05:30
Fix cannot reopen after pushing commits to a closed PR (#23189)
Close: #22784
1. On GH, we can reopen a PR which was closed before after pushing
commits. After reopening PR, we can see the commits that were pushed
after closing PR in the time line. So the case of
[issue](https://github.com/go-gitea/gitea/issues/22784) is a bug which
needs to be fixed.
2. After closing a PR and pushing commits, `headBranchSha` is not equal
to `sha`(which is the last commit ID string of reference). If the
judgement exists, the button of reopen will not display. So, skip the
judgement if the status of PR is closed.
![image](https://user-images.githubusercontent.com/33891828/222037529-651fccf9-0bba-433e-b2f0-79c17e0cc812.png)
3. Even if PR is already close, we should still insert comment record
into DB when we push commits.
So we should still call function `CreatePushPullComment()`.
https://github.com/go-gitea/gitea/blob/067b0c2664d127c552ccdfd264257caca4907a77/services/pull/pull.go#L260-L282
So, I add a switch(`includeClosed`) to the
`GetUnmergedPullRequestsByHeadInfo` func to control whether the status
of PR must be open. In this case, by setting `includeClosed` to `true`,
we can query the closed PR.
![image](https://user-images.githubusercontent.com/33891828/222621045-bb80987c-10c5-4eac-aa0c-1fb9c6aefb51.png)
4. In the loop of comments, I use the`latestCloseCommentID` variable to
record the last occurrence of the close comment.
In the go template, if the status of PR is closed, the comments whose
type is `CommentTypePullRequestPush(29)` after `latestCloseCommentID`
won't be rendered.
![image](https://user-images.githubusercontent.com/33891828/222058913-c91cf3e3-819b-40c5-8015-654b31eeccff.png)
e.g.
1). The initial status of the PR is opened.
![image](https://user-images.githubusercontent.com/33891828/222453617-33c5093e-f712-4cd6-8489-9f87e2075869.png)
2). Then I click the button of `Close`. PR is closed now.
![image](https://user-images.githubusercontent.com/33891828/222453694-25c588a9-c121-4897-9ae5-0b13cf33d20b.png)
3). I try to push a commit to this PR, even though its current status is
closed.
![image](https://user-images.githubusercontent.com/33891828/222453916-361678fb-7321-410d-9e37-5a26e8095638.png)
But in comments list, this commit do not display.This is as expected :)
![image](https://user-images.githubusercontent.com/33891828/222454169-7617a791-78d2-404e-be5e-77d555f93313.png)
4). Click the `Reopen` button, the commit which is pushed after closing
PR display now.
![image](https://user-images.githubusercontent.com/33891828/222454533-897893b6-b96e-4701-b5cb-b1800f382b8f.png)
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-03-03 18:46:58 +05:30
ctx . Data [ "LatestCloseCommentID" ] = latestCloseCommentID
2020-10-26 03:19:48 +05:30
// Combine multiple label assignments into a single comment
combineLabelComments ( issue )
2019-12-16 11:50:25 +05:30
getBranchData ( ctx , issue )
2016-12-25 20:57:25 +05:30
if issue . IsPull {
pull := issue . PullRequest
2018-12-12 05:19:33 +05:30
pull . Issue = issue
2016-12-25 21:49:25 +05:30
canDelete := false
2023-10-30 08:43:06 +05:30
allowMerge := false
2016-12-25 21:49:25 +05:30
2017-06-21 06:30:03 +05:30
if ctx . IsSigned {
2022-11-19 13:42:33 +05:30
if err := pull . LoadHeadRepo ( ctx ) ; err != nil {
2020-03-03 04:01:55 +05:30
log . Error ( "LoadHeadRepo: %v" , err )
2022-04-28 21:15:33 +05:30
} else if pull . HeadRepo != nil {
2022-05-11 15:39:36 +05:30
perm , err := access_model . GetUserRepoPermission ( ctx , pull . HeadRepo , ctx . Doer )
2018-11-28 16:56:14 +05:30
if err != nil {
ctx . ServerError ( "GetUserRepoPermission" , err )
return
}
2021-11-10 01:27:58 +05:30
if perm . CanWrite ( unit . TypeCode ) {
2018-11-28 16:56:14 +05:30
// Check if branch is not protected
2022-04-28 21:15:33 +05:30
if pull . HeadBranch != pull . HeadRepo . DefaultBranch {
2023-01-16 13:30:22 +05:30
if protected , err := git_model . IsBranchProtected ( ctx , pull . HeadRepo . ID , pull . HeadBranch ) ; err != nil {
2022-04-28 21:15:33 +05:30
log . Error ( "IsProtectedBranch: %v" , err )
} else if ! protected {
canDelete = true
ctx . Data [ "DeleteBranchLink" ] = issue . Link ( ) + "/cleanup"
}
2018-11-28 16:56:14 +05:30
}
2022-04-28 21:15:33 +05:30
ctx . Data [ "CanWriteToHeadRepo" ] = true
2017-06-21 06:30:03 +05:30
}
2016-12-25 21:49:25 +05:30
}
2020-01-11 12:59:34 +05:30
2022-11-19 13:42:33 +05:30
if err := pull . LoadBaseRepo ( ctx ) ; err != nil {
2020-03-03 04:01:55 +05:30
log . Error ( "LoadBaseRepo: %v" , err )
2020-01-11 12:59:34 +05:30
}
2022-05-11 15:39:36 +05:30
perm , err := access_model . GetUserRepoPermission ( ctx , pull . BaseRepo , ctx . Doer )
2020-01-11 12:59:34 +05:30
if err != nil {
ctx . ServerError ( "GetUserRepoPermission" , err )
return
}
2023-10-30 08:43:06 +05:30
allowMerge , err = pull_service . IsUserAllowedToMerge ( ctx , pull , perm , ctx . Doer )
2020-01-11 12:59:34 +05:30
if err != nil {
ctx . ServerError ( "IsUserAllowedToMerge" , err )
return
}
2020-04-18 19:20:25 +05:30
2023-09-29 17:42:54 +05:30
if ctx . Data [ "CanMarkConversation" ] , err = issues_model . CanMarkConversation ( ctx , issue , ctx . Doer ) ; err != nil {
2020-04-18 19:20:25 +05:30
ctx . ServerError ( "CanMarkConversation" , err )
return
}
2016-12-25 21:49:25 +05:30
}
2016-12-25 20:57:25 +05:30
2023-10-30 08:43:06 +05:30
ctx . Data [ "AllowMerge" ] = allowMerge
2022-12-10 08:16:31 +05:30
prUnit , err := repo . GetUnit ( ctx , unit . TypePullRequests )
2018-01-06 00:26:50 +05:30
if err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "GetUnit" , err )
2018-01-06 00:26:50 +05:30
return
}
prConfig := prUnit . PullRequestsConfig ( )
2022-05-08 18:02:45 +05:30
var mergeStyle repo_model . MergeStyle
2018-01-06 00:26:50 +05:30
// Check correct values and select default
2021-12-10 06:57:50 +05:30
if ms , ok := ctx . Data [ "MergeStyle" ] . ( repo_model . MergeStyle ) ; ! ok ||
2018-01-06 00:26:50 +05:30
! prConfig . IsMergeStyleAllowed ( ms ) {
2021-03-27 20:25:40 +05:30
defaultMergeStyle := prConfig . GetDefaultMergeStyle ( )
if prConfig . IsMergeStyleAllowed ( defaultMergeStyle ) && ! ok {
2022-05-08 18:02:45 +05:30
mergeStyle = defaultMergeStyle
2021-03-27 20:25:40 +05:30
} else if prConfig . AllowMerge {
2022-05-08 18:02:45 +05:30
mergeStyle = repo_model . MergeStyleMerge
2018-01-06 00:26:50 +05:30
} else if prConfig . AllowRebase {
2022-05-08 18:02:45 +05:30
mergeStyle = repo_model . MergeStyleRebase
2018-12-27 15:57:08 +05:30
} else if prConfig . AllowRebaseMerge {
2022-05-08 18:02:45 +05:30
mergeStyle = repo_model . MergeStyleRebaseMerge
2018-01-06 00:26:50 +05:30
} else if prConfig . AllowSquash {
2022-05-08 18:02:45 +05:30
mergeStyle = repo_model . MergeStyleSquash
2024-02-13 04:07:23 +05:30
} else if prConfig . AllowFastForwardOnly {
mergeStyle = repo_model . MergeStyleFastForwardOnly
2021-03-04 09:11:23 +05:30
} else if prConfig . AllowManualMerge {
2022-05-08 18:02:45 +05:30
mergeStyle = repo_model . MergeStyleManuallyMerged
2018-01-06 00:26:50 +05:30
}
}
2022-05-08 18:02:45 +05:30
ctx . Data [ "MergeStyle" ] = mergeStyle
2022-12-29 18:10:20 +05:30
defaultMergeMessage , defaultMergeBody , err := pull_service . GetDefaultMergeMessage ( ctx , ctx . Repo . GitRepo , pull , mergeStyle )
2022-05-08 18:02:45 +05:30
if err != nil {
ctx . ServerError ( "GetDefaultMergeMessage" , err )
return
}
ctx . Data [ "DefaultMergeMessage" ] = defaultMergeMessage
2022-12-29 18:10:20 +05:30
ctx . Data [ "DefaultMergeBody" ] = defaultMergeBody
2022-05-08 18:02:45 +05:30
2022-12-29 18:10:20 +05:30
defaultSquashMergeMessage , defaultSquashMergeBody , err := pull_service . GetDefaultMergeMessage ( ctx , ctx . Repo . GitRepo , pull , repo_model . MergeStyleSquash )
2022-05-08 18:02:45 +05:30
if err != nil {
ctx . ServerError ( "GetDefaultSquashMergeMessage" , err )
return
}
ctx . Data [ "DefaultSquashMergeMessage" ] = defaultSquashMergeMessage
2022-12-29 18:10:20 +05:30
ctx . Data [ "DefaultSquashMergeBody" ] = defaultSquashMergeBody
2022-05-08 18:02:45 +05:30
2023-01-16 13:30:22 +05:30
pb , err := git_model . GetFirstMatchProtectedBranchRule ( ctx , pull . BaseRepoID , pull . BaseBranch )
if err != nil {
2018-12-11 16:58:37 +05:30
ctx . ServerError ( "LoadProtectedBranch" , err )
return
}
2021-10-17 22:28:36 +05:30
ctx . Data [ "ShowMergeInstructions" ] = true
2023-01-16 13:30:22 +05:30
if pb != nil {
pb . Repo = pull . BaseRepo
2021-11-09 18:44:12 +05:30
var showMergeInstructions bool
2022-03-22 12:33:22 +05:30
if ctx . Doer != nil {
2023-01-16 13:30:22 +05:30
showMergeInstructions = pb . CanUserPush ( ctx , ctx . Doer )
2021-11-09 18:44:12 +05:30
}
2023-01-16 13:30:22 +05:30
ctx . Data [ "ProtectedBranch" ] = pb
ctx . Data [ "IsBlockedByApprovals" ] = ! issues_model . HasEnoughApprovals ( ctx , pb , pull )
ctx . Data [ "IsBlockedByRejection" ] = issues_model . MergeBlockedByRejectedReview ( ctx , pb , pull )
ctx . Data [ "IsBlockedByOfficialReviewRequests" ] = issues_model . MergeBlockedByOfficialReviewRequests ( ctx , pb , pull )
ctx . Data [ "IsBlockedByOutdatedBranch" ] = issues_model . MergeBlockedByOutdatedBranch ( pb , pull )
ctx . Data [ "GrantedApprovals" ] = issues_model . GetGrantedApprovalsCount ( ctx , pb , pull )
ctx . Data [ "RequireSigned" ] = pb . RequireSignedCommits
2020-10-14 00:20:57 +05:30
ctx . Data [ "ChangedProtectedFiles" ] = pull . ChangedProtectedFiles
ctx . Data [ "IsBlockedByChangedProtectedFiles" ] = len ( pull . ChangedProtectedFiles ) != 0
ctx . Data [ "ChangedProtectedFilesNum" ] = len ( pull . ChangedProtectedFiles )
2021-11-09 18:44:12 +05:30
ctx . Data [ "ShowMergeInstructions" ] = showMergeInstructions
2020-01-15 14:02:57 +05:30
}
ctx . Data [ "WillSign" ] = false
2022-03-22 12:33:22 +05:30
if ctx . Doer != nil {
sign , key , _ , err := asymkey_service . SignMerge ( ctx , pull , ctx . Doer , pull . BaseRepo . RepoPath ( ) , pull . BaseBranch , pull . GetGitRefName ( ) )
2020-01-15 14:02:57 +05:30
ctx . Data [ "WillSign" ] = sign
ctx . Data [ "SigningKey" ] = key
if err != nil {
2021-12-10 13:44:24 +05:30
if asymkey_service . IsErrWontSign ( err ) {
ctx . Data [ "WontSignReason" ] = err . ( * asymkey_service . ErrWontSign ) . Reason
2020-01-15 14:02:57 +05:30
} else {
ctx . Data [ "WontSignReason" ] = "error"
log . Error ( "Error whilst checking if could sign pr %d in repo %s. Error: %v" , pull . ID , pull . BaseRepo . FullName ( ) , err )
}
}
2020-08-24 03:29:41 +05:30
} else {
ctx . Data [ "WontSignReason" ] = "not_signed_in"
2018-12-11 16:58:37 +05:30
}
2022-01-04 01:15:58 +05:30
isPullBranchDeletable := canDelete &&
2020-01-07 22:36:14 +05:30
pull . HeadRepo != nil &&
2021-12-01 01:36:32 +05:30
git . IsBranchExist ( ctx , pull . HeadRepo . RepoPath ( ) , pull . HeadBranch ) &&
2020-01-07 22:36:14 +05:30
( ! pull . HasMerged || ctx . Data [ "HeadBranchCommitID" ] == ctx . Data [ "PullHeadCommitID" ] )
2021-03-04 09:11:23 +05:30
2022-01-04 01:15:58 +05:30
if isPullBranchDeletable && pull . HasMerged {
2022-06-13 15:07:59 +05:30
exist , err := issues_model . HasUnmergedPullRequestsByHeadInfo ( ctx , pull . HeadRepoID , pull . HeadBranch )
2022-01-04 01:15:58 +05:30
if err != nil {
ctx . ServerError ( "HasUnmergedPullRequestsByHeadInfo" , err )
return
}
isPullBranchDeletable = ! exist
}
ctx . Data [ "IsPullBranchDeletable" ] = isPullBranchDeletable
2021-03-04 09:11:23 +05:30
stillCanManualMerge := func ( ) bool {
if pull . HasMerged || issue . IsClosed || ! ctx . IsSigned {
return false
}
2023-10-11 09:54:07 +05:30
if pull . CanAutoMerge ( ) || pull . IsWorkInProgress ( ctx ) || pull . IsChecking ( ) {
2021-03-04 09:11:23 +05:30
return false
}
2023-10-30 08:43:06 +05:30
if allowMerge && prConfig . AllowManualMerge {
2021-03-04 09:11:23 +05:30
return true
}
return false
}
ctx . Data [ "StillCanManualMerge" ] = stillCanManualMerge ( )
2022-05-07 22:35:52 +05:30
// Check if there is a pending pr merge
ctx . Data [ "HasPendingPullRequestMerge" ] , ctx . Data [ "PendingPullRequestMerge" ] , err = pull_model . GetScheduledMergeByPullID ( ctx , pull . ID )
if err != nil {
ctx . ServerError ( "GetScheduledMergeByPullID" , err )
return
}
2016-12-25 20:57:25 +05:30
}
2018-07-18 02:53:58 +05:30
// Get Dependencies
2023-03-28 22:53:25 +05:30
blockedBy , err := issue . BlockedByDependencies ( ctx , db . ListOptions { } )
2019-06-13 01:11:28 +05:30
if err != nil {
ctx . ServerError ( "BlockedByDependencies" , err )
return
}
2023-03-28 22:53:25 +05:30
ctx . Data [ "BlockedByDependencies" ] , ctx . Data [ "BlockedByDependenciesNotPermitted" ] = checkBlockedByIssues ( ctx , blockedBy )
if ctx . Written ( ) {
return
}
blocking , err := issue . BlockingDependencies ( ctx )
2019-06-13 01:11:28 +05:30
if err != nil {
ctx . ServerError ( "BlockingDependencies" , err )
return
}
2018-07-18 02:53:58 +05:30
2024-01-12 22:19:02 +05:30
ctx . Data [ "BlockingDependencies" ] , ctx . Data [ "BlockingDependenciesNotPermitted" ] = checkBlockedByIssues ( ctx , blocking )
2023-03-28 22:53:25 +05:30
if ctx . Written ( ) {
return
}
2023-05-25 18:47:19 +05:30
var pinAllowed bool
if ! issue . IsPinned ( ) {
pinAllowed , err = issues_model . IsNewPinAllowed ( ctx , issue . RepoID , issue . IsPull )
if err != nil {
ctx . ServerError ( "IsNewPinAllowed" , err )
return
}
} else {
pinAllowed = true
}
2016-01-19 18:34:24 +05:30
ctx . Data [ "Participants" ] = participants
2016-02-02 07:25:12 +05:30
ctx . Data [ "NumParticipants" ] = len ( participants )
2014-07-26 11:58:04 +05:30
ctx . Data [ "Issue" ] = issue
2021-12-18 02:59:09 +05:30
ctx . Data [ "Reference" ] = issue . Ref
2021-11-16 23:48:25 +05:30
ctx . Data [ "SignInLink" ] = setting . AppSubURL + "/user/login?redirect_to=" + url . QueryEscape ( ctx . Data [ "Link" ] . ( string ) )
2022-03-22 12:33:22 +05:30
ctx . Data [ "IsIssuePoster" ] = ctx . IsSigned && issue . IsPoster ( ctx . Doer . ID )
2020-04-04 11:09:48 +05:30
ctx . Data [ "HasIssuesOrPullsWritePermission" ] = ctx . Repo . CanWriteIssuesOrPulls ( issue . IsPull )
2021-11-10 01:27:58 +05:30
ctx . Data [ "HasProjectsWritePermission" ] = ctx . Repo . CanWrite ( unit . TypeProjects )
2022-03-22 12:33:22 +05:30
ctx . Data [ "IsRepoAdmin" ] = ctx . IsSigned && ( ctx . Repo . IsAdmin ( ) || ctx . Doer . IsAdmin )
2019-02-19 02:25:04 +05:30
ctx . Data [ "LockReasons" ] = setting . Repository . Issue . LockReasons
2023-05-26 06:34:48 +05:30
ctx . Data [ "RefEndName" ] = git . RefName ( issue . Ref ) . ShortName ( )
2023-05-25 18:47:19 +05:30
ctx . Data [ "NewPinAllowed" ] = pinAllowed
ctx . Data [ "PinEnabled" ] = setting . Repository . Issue . MaxPinned != 0
2022-01-21 23:29:26 +05:30
var hiddenCommentTypes * big . Int
if ctx . IsSigned {
2023-09-15 11:43:19 +05:30
val , err := user_model . GetUserSetting ( ctx , ctx . Doer . ID , user_model . SettingsKeyHiddenCommentTypes )
2022-01-21 23:29:26 +05:30
if err != nil {
ctx . ServerError ( "GetUserSetting" , err )
return
}
hiddenCommentTypes , _ = new ( big . Int ) . SetString ( val , 10 ) // we can safely ignore the failed conversion here
}
2022-06-13 15:07:59 +05:30
ctx . Data [ "ShouldShowCommentType" ] = func ( commentType issues_model . CommentType ) bool {
2022-01-21 23:29:26 +05:30
return hiddenCommentTypes == nil || hiddenCommentTypes . Bit ( int ( commentType ) ) == 0
}
2023-07-21 16:50:04 +05:30
// For sidebar
PrepareBranchList ( ctx )
if ctx . Written ( ) {
return
}
tags , err := repo_model . GetTagNamesByRepoID ( ctx , ctx . Repo . Repository . ID )
if err != nil {
ctx . ServerError ( "GetTagNamesByRepoID" , err )
return
}
ctx . Data [ "Tags" ] = tags
2022-01-21 23:29:26 +05:30
2021-04-05 21:00:52 +05:30
ctx . HTML ( http . StatusOK , tplIssueView )
2014-07-26 11:58:04 +05:30
}
2023-07-07 11:01:56 +05:30
// checkBlockedByIssues return canRead and notPermitted
2023-03-28 22:53:25 +05:30
func checkBlockedByIssues ( ctx * context . Context , blockers [ ] * issues_model . DependencyInfo ) ( canRead , notPermitted [ ] * issues_model . DependencyInfo ) {
2024-01-12 22:19:02 +05:30
repoPerms := make ( map [ int64 ] access_model . Permission )
repoPerms [ ctx . Repo . Repository . ID ] = ctx . Repo . Permission
for _ , blocker := range blockers {
2023-03-28 22:53:25 +05:30
// Get the permissions for this repository
2024-01-12 22:19:02 +05:30
// If the repo ID exists in the map, return the exist permissions
// else get the permission and add it to the map
var perm access_model . Permission
existPerm , ok := repoPerms [ blocker . RepoID ]
if ok {
perm = existPerm
} else {
var err error
perm , err = access_model . GetUserRepoPermission ( ctx , & blocker . Repository , ctx . Doer )
if err != nil {
ctx . ServerError ( "GetUserRepoPermission" , err )
return nil , nil
2023-03-28 22:53:25 +05:30
}
2024-01-12 22:19:02 +05:30
repoPerms [ blocker . RepoID ] = perm
2023-03-28 22:53:25 +05:30
}
2024-01-12 22:19:02 +05:30
if perm . CanReadIssuesOrPulls ( blocker . Issue . IsPull ) {
canRead = append ( canRead , blocker )
} else {
notPermitted = append ( notPermitted , blocker )
2023-03-28 22:53:25 +05:30
}
}
2024-01-12 22:19:02 +05:30
sortDependencyInfo ( canRead )
2023-03-28 22:53:25 +05:30
sortDependencyInfo ( notPermitted )
2024-01-12 22:19:02 +05:30
return canRead , notPermitted
2023-03-28 22:53:25 +05:30
}
func sortDependencyInfo ( blockers [ ] * issues_model . DependencyInfo ) {
sort . Slice ( blockers , func ( i , j int ) bool {
if blockers [ i ] . RepoID == blockers [ j ] . RepoID {
return blockers [ i ] . Issue . CreatedUnix < blockers [ j ] . Issue . CreatedUnix
}
return blockers [ i ] . RepoID < blockers [ j ] . RepoID
} )
}
2017-09-12 12:18:13 +05:30
// GetActionIssue will return the issue which is used in the context.
2022-06-13 15:07:59 +05:30
func GetActionIssue ( ctx * context . Context ) * issues_model . Issue {
2023-07-22 19:44:27 +05:30
issue , err := issues_model . GetIssueByIndex ( ctx , ctx . Repo . Repository . ID , ctx . ParamsInt64 ( ":index" ) )
2014-07-26 11:58:04 +05:30
if err != nil {
2022-06-13 15:07:59 +05:30
ctx . NotFoundOrServerError ( "GetIssueByIndex" , issues_model . IsErrIssueNotExist , err )
2017-10-16 13:25:43 +05:30
return nil
}
2018-12-13 21:25:43 +05:30
issue . Repo = ctx . Repo . Repository
2017-12-04 04:44:26 +05:30
checkIssueRights ( ctx , issue )
if ctx . Written ( ) {
2017-10-16 13:25:43 +05:30
return nil
}
2022-06-13 15:07:59 +05:30
if err = issue . LoadAttributes ( ctx ) ; err != nil {
2023-07-06 00:22:12 +05:30
ctx . ServerError ( "LoadAttributes" , err )
2015-08-19 20:44:57 +05:30
return nil
}
return issue
}
2022-06-13 15:07:59 +05:30
func checkIssueRights ( ctx * context . Context , issue * issues_model . Issue ) {
2021-11-10 01:27:58 +05:30
if issue . IsPull && ! ctx . Repo . CanRead ( unit . TypePullRequests ) ||
! issue . IsPull && ! ctx . Repo . CanRead ( unit . TypeIssues ) {
2018-01-11 03:04:17 +05:30
ctx . NotFound ( "IssueOrPullRequestUnitNotAllowed" , nil )
2017-12-04 04:44:26 +05:30
}
}
2023-06-24 21:01:28 +05:30
func getActionIssues ( ctx * context . Context ) issues_model . IssueList {
2021-08-11 06:01:13 +05:30
commaSeparatedIssueIDs := ctx . FormString ( "issue_ids" )
2017-03-15 06:40:35 +05:30
if len ( commaSeparatedIssueIDs ) == 0 {
return nil
}
issueIDs := make ( [ ] int64 , 0 , 10 )
for _ , stringIssueID := range strings . Split ( commaSeparatedIssueIDs , "," ) {
issueID , err := strconv . ParseInt ( stringIssueID , 10 , 64 )
if err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "ParseInt" , err )
2017-03-15 06:40:35 +05:30
return nil
}
issueIDs = append ( issueIDs , issueID )
}
2022-06-13 15:07:59 +05:30
issues , err := issues_model . GetIssuesByIDs ( ctx , issueIDs )
2017-03-15 06:40:35 +05:30
if err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "GetIssuesByIDs" , err )
2017-03-15 06:40:35 +05:30
return nil
}
2017-10-16 13:25:43 +05:30
// Check access rights for all issues
2021-11-10 01:27:58 +05:30
issueUnitEnabled := ctx . Repo . CanRead ( unit . TypeIssues )
prUnitEnabled := ctx . Repo . CanRead ( unit . TypePullRequests )
2017-10-16 13:25:43 +05:30
for _ , issue := range issues {
2022-06-30 21:25:08 +05:30
if issue . RepoID != ctx . Repo . Repository . ID {
ctx . NotFound ( "some issue's RepoID is incorrect" , errors . New ( "some issue's RepoID is incorrect" ) )
return nil
}
2017-10-16 13:25:43 +05:30
if issue . IsPull && ! prUnitEnabled || ! issue . IsPull && ! issueUnitEnabled {
2018-01-11 03:04:17 +05:30
ctx . NotFound ( "IssueOrPullRequestUnitNotAllowed" , nil )
2017-10-16 13:25:43 +05:30
return nil
}
2022-06-13 15:07:59 +05:30
if err = issue . LoadAttributes ( ctx ) ; err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "LoadAttributes" , err )
2017-10-16 13:25:43 +05:30
return nil
}
}
2017-03-15 06:40:35 +05:30
return issues
}
2022-04-08 00:29:56 +05:30
// GetIssueInfo get an issue of a repository
func GetIssueInfo ( ctx * context . Context ) {
2023-07-22 19:44:27 +05:30
issue , err := issues_model . GetIssueWithAttrsByIndex ( ctx , ctx . Repo . Repository . ID , ctx . ParamsInt64 ( ":index" ) )
2022-04-08 00:29:56 +05:30
if err != nil {
2022-06-13 15:07:59 +05:30
if issues_model . IsErrIssueNotExist ( err ) {
2022-04-08 00:29:56 +05:30
ctx . Error ( http . StatusNotFound )
} else {
ctx . Error ( http . StatusInternalServerError , "GetIssueByIndex" , err . Error ( ) )
}
return
}
2022-06-05 01:40:54 +05:30
if issue . IsPull {
// Need to check if Pulls are enabled and we can read Pulls
if ! ctx . Repo . Repository . CanEnablePulls ( ) || ! ctx . Repo . CanRead ( unit . TypePullRequests ) {
ctx . Error ( http . StatusNotFound )
return
}
} else {
// Need to check if Issues are enabled and we can read Issues
if ! ctx . Repo . CanRead ( unit . TypeIssues ) {
ctx . Error ( http . StatusNotFound )
return
}
}
2024-04-09 02:56:41 +05:30
ctx . JSON ( http . StatusOK , convert . ToIssue ( ctx , ctx . Doer , issue ) )
2022-04-08 00:29:56 +05:30
}
2016-11-24 12:34:31 +05:30
// UpdateIssueTitle change issue's title
2016-03-11 22:26:52 +05:30
func UpdateIssueTitle ( ctx * context . Context ) {
2017-09-12 12:18:13 +05:30
issue := GetActionIssue ( ctx )
2015-08-19 20:44:57 +05:30
if ctx . Written ( ) {
2014-07-26 11:58:04 +05:30
return
}
2022-03-22 12:33:22 +05:30
if ! ctx . IsSigned || ( ! issue . IsPoster ( ctx . Doer . ID ) && ! ctx . Repo . CanWriteIssuesOrPulls ( issue . IsPull ) ) {
2021-04-05 21:00:52 +05:30
ctx . Error ( http . StatusForbidden )
2014-07-26 11:58:04 +05:30
return
}
2021-07-29 07:12:15 +05:30
title := ctx . FormTrim ( "title" )
2016-08-14 16:02:24 +05:30
if len ( title ) == 0 {
2021-04-05 21:00:52 +05:30
ctx . Error ( http . StatusNoContent )
2015-08-19 20:44:57 +05:30
return
2014-07-26 11:58:04 +05:30
}
2015-08-19 20:44:57 +05:30
2023-04-14 23:48:28 +05:30
if err := issue_service . ChangeTitle ( ctx , issue , ctx . Doer , title ) ; err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "ChangeTitle" , err )
2014-07-26 11:58:04 +05:30
return
}
2023-07-05 00:06:08 +05:30
ctx . JSON ( http . StatusOK , map [ string ] any {
2016-08-14 16:02:24 +05:30
"title" : issue . Title ,
2014-07-26 11:58:04 +05:30
} )
}
2020-09-08 21:59:51 +05:30
// UpdateIssueRef change issue's ref (branch)
func UpdateIssueRef ( ctx * context . Context ) {
issue := GetActionIssue ( ctx )
if ctx . Written ( ) {
return
}
2022-03-22 12:33:22 +05:30
if ! ctx . IsSigned || ( ! issue . IsPoster ( ctx . Doer . ID ) && ! ctx . Repo . CanWriteIssuesOrPulls ( issue . IsPull ) ) || issue . IsPull {
2021-04-05 21:00:52 +05:30
ctx . Error ( http . StatusForbidden )
2020-09-08 21:59:51 +05:30
return
}
2021-07-29 07:12:15 +05:30
ref := ctx . FormTrim ( "ref" )
2020-09-08 21:59:51 +05:30
2023-04-14 23:48:28 +05:30
if err := issue_service . ChangeIssueRef ( ctx , issue , ctx . Doer , ref ) ; err != nil {
2020-09-08 21:59:51 +05:30
ctx . ServerError ( "ChangeRef" , err )
return
}
2023-07-05 00:06:08 +05:30
ctx . JSON ( http . StatusOK , map [ string ] any {
2020-09-08 21:59:51 +05:30
"ref" : ref ,
} )
}
2016-11-24 12:34:31 +05:30
// UpdateIssueContent change issue's content
2016-03-11 22:26:52 +05:30
func UpdateIssueContent ( ctx * context . Context ) {
2017-09-12 12:18:13 +05:30
issue := GetActionIssue ( ctx )
2015-08-20 02:01:28 +05:30
if ctx . Written ( ) {
return
}
2022-03-22 12:33:22 +05:30
if ! ctx . IsSigned || ( ctx . Doer . ID != issue . PosterID && ! ctx . Repo . CanWriteIssuesOrPulls ( issue . IsPull ) ) {
2021-04-05 21:00:52 +05:30
ctx . Error ( http . StatusForbidden )
2015-08-20 02:01:28 +05:30
return
}
2024-05-27 21:04:18 +05:30
if err := issue_service . ChangeContent ( ctx , issue , ctx . Doer , ctx . Req . FormValue ( "content" ) , ctx . FormInt ( "content_version" ) ) ; err != nil {
if errors . Is ( err , issues_model . ErrIssueAlreadyChanged ) {
if issue . IsPull {
ctx . JSONError ( ctx . Tr ( "repo.pulls.edit.already_changed" ) )
} else {
ctx . JSONError ( ctx . Tr ( "repo.issues.edit.already_changed" ) )
}
} else {
ctx . ServerError ( "ChangeContent" , err )
}
2015-08-20 02:01:28 +05:30
return
}
2021-08-21 18:34:47 +05:30
// when update the request doesn't intend to update attachments (eg: change checkbox state), ignore attachment updates
if ! ctx . FormBool ( "ignore_attachments" ) {
2022-05-20 19:38:52 +05:30
if err := updateAttachments ( ctx , issue , ctx . FormStrings ( "files[]" ) ) ; err != nil {
2021-08-21 18:34:47 +05:30
ctx . ServerError ( "UpdateAttachments" , err )
return
}
2021-04-20 03:55:08 +05:30
}
content , err := markdown . RenderString ( & markup . RenderContext {
2024-01-15 14:19:24 +05:30
Links : markup . Links {
Base : ctx . FormString ( "context" ) , // FIXME: <- IS THIS SAFE ?
} ,
Metas : ctx . Repo . Repository . ComposeMetas ( ctx ) ,
GitRepo : ctx . Repo . GitRepo ,
Ctx : ctx ,
2021-04-20 03:55:08 +05:30
} , issue . Content )
if err != nil {
ctx . ServerError ( "RenderString" , err )
return
2019-10-15 17:49:32 +05:30
}
2023-07-05 00:06:08 +05:30
ctx . JSON ( http . StatusOK , map [ string ] any {
2024-05-27 21:04:18 +05:30
"content" : content ,
"contentVersion" : issue . ContentVersion ,
"attachments" : attachmentsHTML ( ctx , issue . Attachments , issue . Content ) ,
2015-08-20 02:01:28 +05:30
} )
}
2022-04-08 00:29:56 +05:30
// UpdateIssueDeadline updates an issue deadline
func UpdateIssueDeadline ( ctx * context . Context ) {
form := web . GetForm ( ctx ) . ( * api . EditDeadlineOption )
2023-07-22 19:44:27 +05:30
issue , err := issues_model . GetIssueByIndex ( ctx , ctx . Repo . Repository . ID , ctx . ParamsInt64 ( ":index" ) )
2022-04-08 00:29:56 +05:30
if err != nil {
2022-06-13 15:07:59 +05:30
if issues_model . IsErrIssueNotExist ( err ) {
2022-04-08 00:29:56 +05:30
ctx . NotFound ( "GetIssueByIndex" , err )
} else {
ctx . Error ( http . StatusInternalServerError , "GetIssueByIndex" , err . Error ( ) )
}
return
}
if ! ctx . Repo . CanWriteIssuesOrPulls ( issue . IsPull ) {
ctx . Error ( http . StatusForbidden , "" , "Not repo writer" )
return
}
var deadlineUnix timeutil . TimeStamp
var deadline time . Time
if form . Deadline != nil && ! form . Deadline . IsZero ( ) {
deadline = time . Date ( form . Deadline . Year ( ) , form . Deadline . Month ( ) , form . Deadline . Day ( ) ,
23 , 59 , 59 , 0 , time . Local )
deadlineUnix = timeutil . TimeStamp ( deadline . Unix ( ) )
}
2023-09-29 17:42:54 +05:30
if err := issues_model . UpdateIssueDeadline ( ctx , issue , deadlineUnix , ctx . Doer ) ; err != nil {
2022-04-08 00:29:56 +05:30
ctx . Error ( http . StatusInternalServerError , "UpdateIssueDeadline" , err . Error ( ) )
return
}
ctx . JSON ( http . StatusCreated , api . IssueDeadline { Deadline : & deadline } )
}
2016-11-24 12:34:31 +05:30
// UpdateIssueMilestone change issue's milestone
2016-03-11 22:26:52 +05:30
func UpdateIssueMilestone ( ctx * context . Context ) {
2017-03-15 06:40:35 +05:30
issues := getActionIssues ( ctx )
2015-08-14 22:12:43 +05:30
if ctx . Written ( ) {
2014-07-26 11:58:04 +05:30
return
}
2021-07-29 07:12:15 +05:30
milestoneID := ctx . FormInt64 ( "id" )
2017-03-15 06:40:35 +05:30
for _ , issue := range issues {
oldMilestoneID := issue . MilestoneID
if oldMilestoneID == milestoneID {
continue
}
issue . MilestoneID = milestoneID
2023-10-11 09:54:07 +05:30
if err := issue_service . ChangeMilestoneAssign ( ctx , issue , ctx . Doer , oldMilestoneID ) ; err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "ChangeMilestoneAssign" , err )
2017-03-15 06:40:35 +05:30
return
}
2014-07-26 11:58:04 +05:30
}
2023-07-26 11:34:01 +05:30
ctx . JSONOK ( )
2014-07-26 11:58:04 +05:30
}
2019-10-25 20:16:37 +05:30
// UpdateIssueAssignee change issue's or pull's assignee
2016-03-11 22:26:52 +05:30
func UpdateIssueAssignee ( ctx * context . Context ) {
2017-03-15 06:40:35 +05:30
issues := getActionIssues ( ctx )
2015-08-14 22:12:43 +05:30
if ctx . Written ( ) {
2014-07-26 11:58:04 +05:30
return
}
2021-07-29 07:12:15 +05:30
assigneeID := ctx . FormInt64 ( "id" )
2021-08-11 06:01:13 +05:30
action := ctx . FormString ( "action" )
2018-05-09 21:59:04 +05:30
2017-03-15 06:40:35 +05:30
for _ , issue := range issues {
2018-05-09 21:59:04 +05:30
switch action {
case "clear" :
2023-04-14 23:48:28 +05:30
if err := issue_service . DeleteNotPassedAssignee ( ctx , issue , ctx . Doer , [ ] * user_model . User { } ) ; err != nil {
2018-05-09 21:59:04 +05:30
ctx . ServerError ( "ClearAssignees" , err )
return
}
default :
2022-12-03 08:18:26 +05:30
assignee , err := user_model . GetUserByID ( ctx , assigneeID )
2019-10-25 20:16:37 +05:30
if err != nil {
ctx . ServerError ( "GetUserByID" , err )
return
}
2022-05-11 15:39:36 +05:30
valid , err := access_model . CanBeAssigned ( ctx , assignee , issue . Repo , issue . IsPull )
2019-10-25 20:16:37 +05:30
if err != nil {
ctx . ServerError ( "canBeAssigned" , err )
2018-05-09 21:59:04 +05:30
return
}
2019-10-25 20:16:37 +05:30
if ! valid {
2022-06-13 15:07:59 +05:30
ctx . ServerError ( "canBeAssigned" , repo_model . ErrUserDoesNotHaveAccessToRepo { UserID : assigneeID , RepoName : issue . Repo . Name } )
2019-10-25 20:16:37 +05:30
return
}
2023-08-10 08:09:21 +05:30
_ , _ , err = issue_service . ToggleAssigneeWithNotify ( ctx , issue , ctx . Doer , assigneeID )
2019-10-25 20:16:37 +05:30
if err != nil {
ctx . ServerError ( "ToggleAssignee" , err )
return
}
2017-03-15 06:40:35 +05:30
}
2014-07-26 11:58:04 +05:30
}
2023-07-26 11:34:01 +05:30
ctx . JSONOK ( )
2017-03-15 06:40:35 +05:30
}
2014-07-26 11:58:04 +05:30
2020-10-13 01:25:13 +05:30
// UpdatePullReviewRequest add or remove review request
func UpdatePullReviewRequest ( ctx * context . Context ) {
2020-04-06 22:03:34 +05:30
issues := getActionIssues ( ctx )
if ctx . Written ( ) {
return
}
2021-07-29 07:12:15 +05:30
reviewID := ctx . FormInt64 ( "id" )
2021-08-11 06:01:13 +05:30
action := ctx . FormString ( "action" )
2020-04-06 22:03:34 +05:30
2020-09-02 22:25:13 +05:30
// TODO: Not support 'clear' now
if action != "attach" && action != "detach" {
2022-03-23 10:24:07 +05:30
ctx . Status ( http . StatusForbidden )
2020-04-06 22:03:34 +05:30
return
}
for _ , issue := range issues {
2022-04-08 14:41:15 +05:30
if err := issue . LoadRepo ( ctx ) ; err != nil {
2020-10-13 01:25:13 +05:30
ctx . ServerError ( "issue.LoadRepo" , err )
return
}
if ! issue . IsPull {
log . Warn (
"UpdatePullReviewRequest: refusing to add review request for non-PR issue %-v#%d" ,
issue . Repo , issue . Index ,
)
2022-03-23 10:24:07 +05:30
ctx . Status ( http . StatusForbidden )
2020-10-13 01:25:13 +05:30
return
}
if reviewID < 0 {
// negative reviewIDs represent team requests
2023-02-18 17:41:03 +05:30
if err := issue . Repo . LoadOwner ( ctx ) ; err != nil {
ctx . ServerError ( "issue.Repo.LoadOwner" , err )
2020-10-13 01:25:13 +05:30
return
}
if ! issue . Repo . Owner . IsOrganization ( ) {
log . Warn (
"UpdatePullReviewRequest: refusing to add team review request for %s#%d owned by non organization UID[%d]" ,
issue . Repo . FullName ( ) , issue . Index , issue . Repo . ID ,
)
2022-03-23 10:24:07 +05:30
ctx . Status ( http . StatusForbidden )
2020-10-13 01:25:13 +05:30
return
}
2020-04-06 22:03:34 +05:30
2022-05-20 19:38:52 +05:30
team , err := organization . GetTeamByID ( ctx , - reviewID )
2020-04-06 22:03:34 +05:30
if err != nil {
2022-03-29 11:59:02 +05:30
ctx . ServerError ( "GetTeamByID" , err )
2020-04-06 22:03:34 +05:30
return
}
2020-10-13 01:25:13 +05:30
if team . OrgID != issue . Repo . OwnerID {
log . Warn (
"UpdatePullReviewRequest: refusing to add team review request for UID[%d] team %s to %s#%d owned by UID[%d]" ,
team . OrgID , team . Name , issue . Repo . FullName ( ) , issue . Index , issue . Repo . ID )
2022-03-23 10:24:07 +05:30
ctx . Status ( http . StatusForbidden )
2020-10-13 01:25:13 +05:30
return
}
2022-04-28 17:18:48 +05:30
err = issue_service . IsValidTeamReviewRequest ( ctx , team , ctx . Doer , action == "attach" , issue )
2020-04-06 22:03:34 +05:30
if err != nil {
2022-06-13 15:07:59 +05:30
if issues_model . IsErrNotValidReviewRequest ( err ) {
2020-10-13 01:25:13 +05:30
log . Warn (
"UpdatePullReviewRequest: refusing to add invalid team review request for UID[%d] team %s to %s#%d owned by UID[%d]: Error: %v" ,
team . OrgID , team . Name , issue . Repo . FullName ( ) , issue . Index , issue . Repo . ID ,
err ,
)
2022-03-23 10:24:07 +05:30
ctx . Status ( http . StatusForbidden )
2020-10-13 01:25:13 +05:30
return
}
2020-10-20 23:48:25 +05:30
ctx . ServerError ( "IsValidTeamReviewRequest" , err )
2020-04-06 22:03:34 +05:30
return
}
2023-04-14 23:48:28 +05:30
_ , err = issue_service . TeamReviewRequest ( ctx , issue , ctx . Doer , team , action == "attach" )
2020-04-06 22:03:34 +05:30
if err != nil {
2020-10-13 01:25:13 +05:30
ctx . ServerError ( "TeamReviewRequest" , err )
2020-04-06 22:03:34 +05:30
return
}
2020-10-13 01:25:13 +05:30
continue
}
2022-12-03 08:18:26 +05:30
reviewer , err := user_model . GetUserByID ( ctx , reviewID )
2020-10-13 01:25:13 +05:30
if err != nil {
2021-11-24 15:19:20 +05:30
if user_model . IsErrUserNotExist ( err ) {
2020-10-13 01:25:13 +05:30
log . Warn (
"UpdatePullReviewRequest: requested reviewer [%d] for %-v to %-v#%d is not exist: Error: %v" ,
reviewID , issue . Repo , issue . Index ,
err ,
)
2022-03-23 10:24:07 +05:30
ctx . Status ( http . StatusForbidden )
2020-10-13 01:25:13 +05:30
return
}
ctx . ServerError ( "GetUserByID" , err )
return
}
2022-04-28 17:18:48 +05:30
err = issue_service . IsValidReviewRequest ( ctx , reviewer , ctx . Doer , action == "attach" , issue , nil )
2020-10-13 01:25:13 +05:30
if err != nil {
2022-06-13 15:07:59 +05:30
if issues_model . IsErrNotValidReviewRequest ( err ) {
2020-10-13 01:25:13 +05:30
log . Warn (
"UpdatePullReviewRequest: refusing to add invalid review request for %-v to %-v#%d: Error: %v" ,
reviewer , issue . Repo , issue . Index ,
err ,
)
2022-03-23 10:24:07 +05:30
ctx . Status ( http . StatusForbidden )
2020-10-13 01:25:13 +05:30
return
}
ctx . ServerError ( "isValidReviewRequest" , err )
return
}
2023-04-14 23:48:28 +05:30
_ , err = issue_service . ReviewRequest ( ctx , issue , ctx . Doer , reviewer , action == "attach" )
2020-10-13 01:25:13 +05:30
if err != nil {
2024-03-28 20:49:24 +05:30
if issues_model . IsErrReviewRequestOnClosedPR ( err ) {
ctx . Status ( http . StatusForbidden )
return
}
2020-10-13 01:25:13 +05:30
ctx . ServerError ( "ReviewRequest" , err )
2020-09-02 22:25:13 +05:30
return
2020-04-06 22:03:34 +05:30
}
}
2023-07-26 11:34:01 +05:30
ctx . JSONOK ( )
2020-04-06 22:03:34 +05:30
}
2022-04-08 00:29:56 +05:30
// SearchIssues searches for issues across the repositories that the user has access to
func SearchIssues ( ctx * context . Context ) {
2023-05-21 07:20:53 +05:30
before , since , err := context . GetQueryBeforeSince ( ctx . Base )
2022-04-08 00:29:56 +05:30
if err != nil {
2023-12-22 21:50:50 +05:30
log . Error ( "GetQueryBeforeSince: %v" , err )
ctx . Error ( http . StatusUnprocessableEntity , "invalid before or since" )
2022-04-08 00:29:56 +05:30
return
}
2024-03-02 21:12:31 +05:30
var isClosed optional . Option [ bool ]
2022-04-08 00:29:56 +05:30
switch ctx . FormString ( "state" ) {
case "closed" :
2024-03-02 21:12:31 +05:30
isClosed = optional . Some ( true )
2022-04-08 00:29:56 +05:30
case "all" :
2024-03-02 21:12:31 +05:30
isClosed = optional . None [ bool ] ( )
2022-04-08 00:29:56 +05:30
default :
2024-03-02 21:12:31 +05:30
isClosed = optional . Some ( false )
2022-04-08 00:29:56 +05:30
}
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
var (
repoIDs [ ] int64
allPublic bool
)
{
// find repos user can access (for issue search)
opts := & repo_model . SearchRepoOptions {
Private : false ,
AllPublic : true ,
TopicOnly : false ,
2024-03-01 00:22:49 +05:30
Collaborate : optional . None [ bool ] ( ) ,
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
// This needs to be a column that is not nil in fixtures or
// MySQL will return different results when sorting by null in some cases
OrderBy : db . SearchOrderByAlphabetically ,
Actor : ctx . Doer ,
}
if ctx . IsSigned {
opts . Private = true
opts . AllLimited = true
}
if ctx . FormString ( "owner" ) != "" {
owner , err := user_model . GetUserByName ( ctx , ctx . FormString ( "owner" ) )
if err != nil {
2023-12-22 21:50:50 +05:30
log . Error ( "GetUserByName: %v" , err )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
if user_model . IsErrUserNotExist ( err ) {
ctx . Error ( http . StatusBadRequest , "Owner not found" , err . Error ( ) )
} else {
2023-12-22 21:50:50 +05:30
ctx . Error ( http . StatusInternalServerError )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
}
return
2022-04-08 00:29:56 +05:30
}
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
opts . OwnerID = owner . ID
opts . AllLimited = false
opts . AllPublic = false
2024-03-01 00:22:49 +05:30
opts . Collaborate = optional . Some ( false )
2022-04-08 00:29:56 +05:30
}
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
if ctx . FormString ( "team" ) != "" {
if ctx . FormString ( "owner" ) == "" {
2023-12-22 21:50:50 +05:30
ctx . Error ( http . StatusBadRequest , "Owner organisation is required for filtering on team" )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
return
}
team , err := organization . GetTeam ( ctx , opts . OwnerID , ctx . FormString ( "team" ) )
if err != nil {
2023-12-22 21:50:50 +05:30
log . Error ( "GetTeam: %v" , err )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
if organization . IsErrTeamNotExist ( err ) {
2023-12-22 21:50:50 +05:30
ctx . Error ( http . StatusBadRequest )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
} else {
2023-12-22 21:50:50 +05:30
ctx . Error ( http . StatusInternalServerError )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
}
return
}
opts . TeamID = team . ID
}
if opts . AllPublic {
allPublic = true
opts . AllPublic = false // set it false to avoid returning too many repos, we could filter by indexer
2022-04-08 00:29:56 +05:30
}
2023-10-11 09:54:07 +05:30
repoIDs , _ , err = repo_model . SearchRepositoryIDs ( ctx , opts )
2022-04-08 00:29:56 +05:30
if err != nil {
2023-12-22 21:50:50 +05:30
log . Error ( "SearchRepositoryIDs: %v" , err )
ctx . Error ( http . StatusInternalServerError )
2022-04-08 00:29:56 +05:30
return
}
2023-08-17 23:12:17 +05:30
if len ( repoIDs ) == 0 {
// no repos found, don't let the indexer return all repos
repoIDs = [ ] int64 { 0 }
}
2022-04-08 00:29:56 +05:30
}
keyword := ctx . FormTrim ( "q" )
if strings . IndexByte ( keyword , 0 ) >= 0 {
keyword = ""
}
2024-03-02 21:12:31 +05:30
isPull := optional . None [ bool ] ( )
2022-04-08 00:29:56 +05:30
switch ctx . FormString ( "type" ) {
case "pulls" :
2024-03-02 21:12:31 +05:30
isPull = optional . Some ( true )
2022-04-08 00:29:56 +05:30
case "issues" :
2024-03-02 21:12:31 +05:30
isPull = optional . Some ( false )
2022-04-08 00:29:56 +05:30
}
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
var includedAnyLabels [ ] int64
{
labels := ctx . FormTrim ( "labels" )
var includedLabelNames [ ] string
if len ( labels ) > 0 {
includedLabelNames = strings . Split ( labels , "," )
}
includedAnyLabels , err = issues_model . GetLabelIDsByNames ( ctx , includedLabelNames )
if err != nil {
2023-12-22 21:50:50 +05:30
log . Error ( "GetLabelIDsByNames: %v" , err )
ctx . Error ( http . StatusInternalServerError )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
return
}
2022-04-08 00:29:56 +05:30
}
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
var includedMilestones [ ] int64
{
milestones := ctx . FormTrim ( "milestones" )
var includedMilestoneNames [ ] string
if len ( milestones ) > 0 {
includedMilestoneNames = strings . Split ( milestones , "," )
}
includedMilestones , err = issues_model . GetMilestoneIDsByNames ( ctx , includedMilestoneNames )
if err != nil {
2023-12-22 21:50:50 +05:30
log . Error ( "GetMilestoneIDsByNames: %v" , err )
ctx . Error ( http . StatusInternalServerError )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
return
}
2022-04-08 00:29:56 +05:30
}
2024-03-13 13:55:53 +05:30
projectID := optional . None [ int64 ] ( )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
if v := ctx . FormInt64 ( "project" ) ; v > 0 {
2024-03-13 13:55:53 +05:30
projectID = optional . Some ( v )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
}
2023-01-29 09:15:29 +05:30
2022-04-08 00:29:56 +05:30
// this api is also used in UI,
// so the default limit is set to fit UI needs
limit := ctx . FormInt ( "limit" )
if limit == 0 {
limit = setting . UI . IssuePagingNum
} else if limit > setting . API . MaxResponseItems {
limit = setting . API . MaxResponseItems
}
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
searchOpt := & issue_indexer . SearchOptions {
Paginator : & db . ListOptions {
Page : ctx . FormInt ( "page" ) ,
PageSize : limit ,
} ,
Keyword : keyword ,
RepoIDs : repoIDs ,
AllPublic : allPublic ,
IsPull : isPull ,
IsClosed : isClosed ,
IncludedAnyLabelIDs : includedAnyLabels ,
MilestoneIDs : includedMilestones ,
ProjectID : projectID ,
SortBy : issue_indexer . SortByCreatedDesc ,
}
2022-04-08 00:29:56 +05:30
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
if since != 0 {
2024-03-13 13:55:53 +05:30
searchOpt . UpdatedAfterUnix = optional . Some ( since )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
}
if before != 0 {
2024-03-13 13:55:53 +05:30
searchOpt . UpdatedBeforeUnix = optional . Some ( before )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
}
if ctx . IsSigned {
ctxUserID := ctx . Doer . ID
2022-04-08 00:29:56 +05:30
if ctx . FormBool ( "created" ) {
2024-03-13 13:55:53 +05:30
searchOpt . PosterID = optional . Some ( ctxUserID )
2022-04-08 00:29:56 +05:30
}
if ctx . FormBool ( "assigned" ) {
2024-03-13 13:55:53 +05:30
searchOpt . AssigneeID = optional . Some ( ctxUserID )
2022-04-08 00:29:56 +05:30
}
if ctx . FormBool ( "mentioned" ) {
2024-03-13 13:55:53 +05:30
searchOpt . MentionID = optional . Some ( ctxUserID )
2022-04-08 00:29:56 +05:30
}
if ctx . FormBool ( "review_requested" ) {
2024-03-13 13:55:53 +05:30
searchOpt . ReviewRequestedID = optional . Some ( ctxUserID )
2022-04-08 00:29:56 +05:30
}
2023-02-25 08:25:50 +05:30
if ctx . FormBool ( "reviewed" ) {
2024-03-13 13:55:53 +05:30
searchOpt . ReviewedID = optional . Some ( ctxUserID )
2023-02-25 08:25:50 +05:30
}
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
}
2022-04-08 00:29:56 +05:30
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
// FIXME: It's unsupported to sort by priority repo when searching by indexer,
// it's indeed an regression, but I think it is worth to support filtering by indexer first.
_ = ctx . FormInt64 ( "priority_repo_id" )
2022-04-08 00:29:56 +05:30
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
ids , total , err := issue_indexer . SearchIssues ( ctx , searchOpt )
if err != nil {
2023-12-22 21:50:50 +05:30
log . Error ( "SearchIssues: %v" , err )
ctx . Error ( http . StatusInternalServerError )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
return
}
issues , err := issues_model . GetIssuesByIDs ( ctx , ids , true )
if err != nil {
2023-12-22 21:50:50 +05:30
log . Error ( "GetIssuesByIDs: %v" , err )
ctx . Error ( http . StatusInternalServerError )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
return
2022-04-08 00:29:56 +05:30
}
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
ctx . SetTotalCountHeader ( total )
2024-04-09 02:56:41 +05:30
ctx . JSON ( http . StatusOK , convert . ToIssueList ( ctx , ctx . Doer , issues ) )
2022-04-08 00:29:56 +05:30
}
func getUserIDForFilter ( ctx * context . Context , queryName string ) int64 {
userName := ctx . FormString ( queryName )
if len ( userName ) == 0 {
return 0
}
2022-05-20 19:38:52 +05:30
user , err := user_model . GetUserByName ( ctx , userName )
2022-04-08 00:29:56 +05:30
if user_model . IsErrUserNotExist ( err ) {
ctx . NotFound ( "" , err )
return 0
}
if err != nil {
ctx . Error ( http . StatusInternalServerError , err . Error ( ) )
return 0
}
return user . ID
}
// ListIssues list the issues of a repository
func ListIssues ( ctx * context . Context ) {
2023-05-21 07:20:53 +05:30
before , since , err := context . GetQueryBeforeSince ( ctx . Base )
2022-04-08 00:29:56 +05:30
if err != nil {
ctx . Error ( http . StatusUnprocessableEntity , err . Error ( ) )
return
}
2024-03-02 21:12:31 +05:30
var isClosed optional . Option [ bool ]
2022-04-08 00:29:56 +05:30
switch ctx . FormString ( "state" ) {
case "closed" :
2024-03-02 21:12:31 +05:30
isClosed = optional . Some ( true )
2022-04-08 00:29:56 +05:30
case "all" :
2024-03-02 21:12:31 +05:30
isClosed = optional . None [ bool ] ( )
2022-04-08 00:29:56 +05:30
default :
2024-03-02 21:12:31 +05:30
isClosed = optional . Some ( false )
2022-04-08 00:29:56 +05:30
}
keyword := ctx . FormTrim ( "q" )
if strings . IndexByte ( keyword , 0 ) >= 0 {
keyword = ""
}
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
var labelIDs [ ] int64
2024-05-09 19:19:37 +05:30
if split := strings . Split ( ctx . FormString ( "labels" ) , "," ) ; len ( split ) > 0 {
labelIDs , err = issues_model . GetLabelIDsInRepoByNames ( ctx , ctx . Repo . Repository . ID , split )
2022-04-08 00:29:56 +05:30
if err != nil {
ctx . Error ( http . StatusInternalServerError , err . Error ( ) )
return
}
}
var mileIDs [ ] int64
if part := strings . Split ( ctx . FormString ( "milestones" ) , "," ) ; len ( part ) > 0 {
for i := range part {
// uses names and fall back to ids
// non existent milestones are discarded
2023-09-16 20:09:12 +05:30
mile , err := issues_model . GetMilestoneByRepoIDANDName ( ctx , ctx . Repo . Repository . ID , part [ i ] )
2022-04-08 00:29:56 +05:30
if err == nil {
mileIDs = append ( mileIDs , mile . ID )
continue
}
2022-04-08 14:41:15 +05:30
if ! issues_model . IsErrMilestoneNotExist ( err ) {
2022-04-08 00:29:56 +05:30
ctx . Error ( http . StatusInternalServerError , err . Error ( ) )
return
}
id , err := strconv . ParseInt ( part [ i ] , 10 , 64 )
if err != nil {
continue
}
2022-04-08 14:41:15 +05:30
mile , err = issues_model . GetMilestoneByRepoID ( ctx , ctx . Repo . Repository . ID , id )
2022-04-08 00:29:56 +05:30
if err == nil {
mileIDs = append ( mileIDs , mile . ID )
continue
}
2022-04-08 14:41:15 +05:30
if issues_model . IsErrMilestoneNotExist ( err ) {
2022-04-08 00:29:56 +05:30
continue
}
ctx . Error ( http . StatusInternalServerError , err . Error ( ) )
}
}
2024-03-13 13:55:53 +05:30
projectID := optional . None [ int64 ] ( )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
if v := ctx . FormInt64 ( "project" ) ; v > 0 {
2024-03-13 13:55:53 +05:30
projectID = optional . Some ( v )
2022-04-08 00:29:56 +05:30
}
2024-03-02 21:12:31 +05:30
isPull := optional . None [ bool ] ( )
2022-04-08 00:29:56 +05:30
switch ctx . FormString ( "type" ) {
case "pulls" :
2024-03-02 21:12:31 +05:30
isPull = optional . Some ( true )
2022-04-08 00:29:56 +05:30
case "issues" :
2024-03-02 21:12:31 +05:30
isPull = optional . Some ( false )
2022-04-08 00:29:56 +05:30
}
// FIXME: we should be more efficient here
createdByID := getUserIDForFilter ( ctx , "created_by" )
if ctx . Written ( ) {
return
}
assignedByID := getUserIDForFilter ( ctx , "assigned_by" )
if ctx . Written ( ) {
return
}
mentionedByID := getUserIDForFilter ( ctx , "mentioned_by" )
if ctx . Written ( ) {
return
}
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
searchOpt := & issue_indexer . SearchOptions {
Paginator : & db . ListOptions {
Page : ctx . FormInt ( "page" ) ,
PageSize : convert . ToCorrectPageSize ( ctx . FormInt ( "limit" ) ) ,
} ,
2024-05-27 14:29:54 +05:30
Keyword : keyword ,
RepoIDs : [ ] int64 { ctx . Repo . Repository . ID } ,
IsPull : isPull ,
IsClosed : isClosed ,
ProjectID : projectID ,
SortBy : issue_indexer . SortByCreatedDesc ,
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
}
if since != 0 {
2024-03-13 13:55:53 +05:30
searchOpt . UpdatedAfterUnix = optional . Some ( since )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
}
if before != 0 {
2024-03-13 13:55:53 +05:30
searchOpt . UpdatedBeforeUnix = optional . Some ( before )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
}
if len ( labelIDs ) == 1 && labelIDs [ 0 ] == 0 {
searchOpt . NoLabelOnly = true
} else {
for _ , labelID := range labelIDs {
if labelID > 0 {
searchOpt . IncludedLabelIDs = append ( searchOpt . IncludedLabelIDs , labelID )
} else {
searchOpt . ExcludedLabelIDs = append ( searchOpt . ExcludedLabelIDs , - labelID )
}
2022-04-08 00:29:56 +05:30
}
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
}
2022-04-08 00:29:56 +05:30
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
if len ( mileIDs ) == 1 && mileIDs [ 0 ] == db . NoConditionID {
searchOpt . MilestoneIDs = [ ] int64 { 0 }
} else {
searchOpt . MilestoneIDs = mileIDs
}
2022-04-08 00:29:56 +05:30
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
if createdByID > 0 {
2024-03-13 13:55:53 +05:30
searchOpt . PosterID = optional . Some ( createdByID )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
}
if assignedByID > 0 {
2024-03-13 13:55:53 +05:30
searchOpt . AssigneeID = optional . Some ( assignedByID )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
}
if mentionedByID > 0 {
2024-03-13 13:55:53 +05:30
searchOpt . MentionID = optional . Some ( mentionedByID )
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
}
ids , total , err := issue_indexer . SearchIssues ( ctx , searchOpt )
if err != nil {
ctx . Error ( http . StatusInternalServerError , "SearchIssues" , err . Error ( ) )
return
}
issues , err := issues_model . GetIssuesByIDs ( ctx , ids , true )
if err != nil {
ctx . Error ( http . StatusInternalServerError , "FindIssuesByIDs" , err . Error ( ) )
return
2022-04-08 00:29:56 +05:30
}
Refactor and enhance issue indexer to support both searching, filtering and paging (#26012)
Fix #24662.
Replace #24822 and #25708 (although it has been merged)
## Background
In the past, Gitea supported issue searching with a keyword and
conditions in a less efficient way. It worked by searching for issues
with the keyword and obtaining limited IDs (as it is heavy to get all)
on the indexer (bleve/elasticsearch/meilisearch), and then querying with
conditions on the database to find a subset of the found IDs. This is
why the results could be incomplete.
To solve this issue, we need to store all fields that could be used as
conditions in the indexer and support both keyword and additional
conditions when searching with the indexer.
## Major changes
- Redefine `IndexerData` to include all fields that could be used as
filter conditions.
- Refactor `Search(ctx context.Context, kw string, repoIDs []int64,
limit, start int, state string)` to `Search(ctx context.Context, options
*SearchOptions)`, so it supports more conditions now.
- Change the data type stored in `issueIndexerQueue`. Use
`IndexerMetadata` instead of `IndexerData` in case the data has been
updated while it is in the queue. This also reduces the storage size of
the queue.
- Enhance searching with Bleve/Elasticsearch/Meilisearch, make them
fully support `SearchOptions`. Also, update the data versions.
- Keep most logic of database indexer, but remove
`issues.SearchIssueIDsByKeyword` in `models` to avoid confusion where is
the entry point to search issues.
- Start a Meilisearch instance to test it in unit tests.
- Add unit tests with almost full coverage to test
Bleve/Elasticsearch/Meilisearch indexer.
---------
Co-authored-by: Lunny Xiao <xiaolunwen@gmail.com>
2023-07-31 11:58:53 +05:30
ctx . SetTotalCountHeader ( total )
2024-04-09 02:56:41 +05:30
ctx . JSON ( http . StatusOK , convert . ToIssueList ( ctx , ctx . Doer , issues ) )
2022-04-08 00:29:56 +05:30
}
2023-06-19 13:16:50 +05:30
func BatchDeleteIssues ( ctx * context . Context ) {
issues := getActionIssues ( ctx )
if ctx . Written ( ) {
return
}
for _ , issue := range issues {
if err := issue_service . DeleteIssue ( ctx , ctx . Doer , ctx . Repo . GitRepo , issue ) ; err != nil {
ctx . ServerError ( "DeleteIssue" , err )
return
}
}
ctx . JSONOK ( )
}
2017-03-15 06:40:35 +05:30
// UpdateIssueStatus change issue's status
func UpdateIssueStatus ( ctx * context . Context ) {
issues := getActionIssues ( ctx )
if ctx . Written ( ) {
2014-07-26 11:58:04 +05:30
return
}
2017-03-15 06:40:35 +05:30
var isClosed bool
2021-08-11 06:01:13 +05:30
switch action := ctx . FormString ( "action" ) ; action {
2017-03-15 06:40:35 +05:30
case "open" :
isClosed = false
case "close" :
isClosed = true
default :
log . Warn ( "Unrecognized action: %s" , action )
}
2023-06-24 21:01:28 +05:30
if _ , err := issues . LoadRepositories ( ctx ) ; err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "LoadRepositories" , err )
2017-03-15 06:40:35 +05:30
return
}
2023-07-16 03:40:49 +05:30
if err := issues . LoadPullRequests ( ctx ) ; err != nil {
ctx . ServerError ( "LoadPullRequests" , err )
return
}
2017-03-15 06:40:35 +05:30
for _ , issue := range issues {
2023-07-16 03:40:49 +05:30
if issue . IsPull && issue . PullRequest . HasMerged {
continue
}
2018-10-18 16:53:05 +05:30
if issue . IsClosed != isClosed {
2023-07-22 19:44:27 +05:30
if err := issue_service . ChangeStatus ( ctx , issue , ctx . Doer , "" , isClosed ) ; err != nil {
2022-06-13 15:07:59 +05:30
if issues_model . IsErrDependenciesLeft ( err ) {
2023-07-05 00:06:08 +05:30
ctx . JSON ( http . StatusPreconditionFailed , map [ string ] any {
2023-04-26 22:24:17 +05:30
"error" : ctx . Tr ( "repo.issues.dependency.issue_batch_close_blocked" , issue . Index ) ,
2018-10-18 16:53:05 +05:30
} )
return
}
ctx . ServerError ( "ChangeStatus" , err )
2018-07-18 02:53:58 +05:30
return
}
2017-03-15 06:40:35 +05:30
}
}
2023-06-19 13:16:50 +05:30
ctx . JSONOK ( )
2014-07-26 11:58:04 +05:30
}
2016-11-24 12:34:31 +05:30
// NewComment create a comment for issue
2021-01-26 21:06:53 +05:30
func NewComment ( ctx * context . Context ) {
2021-04-07 01:14:05 +05:30
form := web . GetForm ( ctx ) . ( * forms . CreateCommentForm )
2017-10-16 13:25:43 +05:30
issue := GetActionIssue ( ctx )
if ctx . Written ( ) {
2014-07-26 11:58:04 +05:30
return
}
2022-03-22 12:33:22 +05:30
if ! ctx . IsSigned || ( ctx . Doer . ID != issue . PosterID && ! ctx . Repo . CanReadIssuesOrPulls ( issue . IsPull ) ) {
2019-04-23 02:10:51 +05:30
if log . IsTrace ( ) {
if ctx . IsSigned {
issueType := "issues"
if issue . IsPull {
issueType = "pulls"
}
log . Trace ( "Permission Denied: User %-v not the Poster (ID: %d) and cannot read %s in Repo %-v.\n" +
"User in Repo has Permissions: %-+v" ,
2022-03-22 12:33:22 +05:30
ctx . Doer ,
Rewrite logger system (#24726)
## ⚠️ Breaking
The `log.<mode>.<logger>` style config has been dropped. If you used it,
please check the new config manual & app.example.ini to make your
instance output logs as expected.
Although many legacy options still work, it's encouraged to upgrade to
the new options.
The SMTP logger is deleted because SMTP is not suitable to collect logs.
If you have manually configured Gitea log options, please confirm the
logger system works as expected after upgrading.
## Description
Close #12082 and maybe more log-related issues, resolve some related
FIXMEs in old code (which seems unfixable before)
Just like rewriting queue #24505 : make code maintainable, clear legacy
bugs, and add the ability to support more writers (eg: JSON, structured
log)
There is a new document (with examples): `logging-config.en-us.md`
This PR is safer than the queue rewriting, because it's just for
logging, it won't break other logic.
## The old problems
The logging system is quite old and difficult to maintain:
* Unclear concepts: Logger, NamedLogger, MultiChannelledLogger,
SubLogger, EventLogger, WriterLogger etc
* Some code is diffuclt to konw whether it is right:
`log.DelNamedLogger("console")` vs `log.DelNamedLogger(log.DEFAULT)` vs
`log.DelLogger("console")`
* The old system heavily depends on ini config system, it's difficult to
create new logger for different purpose, and it's very fragile.
* The "color" trick is difficult to use and read, many colors are
unnecessary, and in the future structured log could help
* It's difficult to add other log formats, eg: JSON format
* The log outputer doesn't have full control of its goroutine, it's
difficult to make outputer have advanced behaviors
* The logs could be lost in some cases: eg: no Fatal error when using
CLI.
* Config options are passed by JSON, which is quite fragile.
* INI package makes the KEY in `[log]` section visible in `[log.sub1]`
and `[log.sub1.subA]`, this behavior is quite fragile and would cause
more unclear problems, and there is no strong requirement to support
`log.<mode>.<logger>` syntax.
## The new design
See `logger.go` for documents.
## Screenshot
<details>
![image](https://github.com/go-gitea/gitea/assets/2114189/4462d713-ba39-41f5-bb08-de912e67e1ff)
![image](https://github.com/go-gitea/gitea/assets/2114189/b188035e-f691-428b-8b2d-ff7b2199b2f9)
![image](https://github.com/go-gitea/gitea/assets/2114189/132e9745-1c3b-4e00-9e0d-15eaea495dee)
</details>
## TODO
* [x] add some new tests
* [x] fix some tests
* [x] test some sub-commands (manually ....)
---------
Co-authored-by: Jason Song <i@wolfogre.com>
Co-authored-by: delvh <dev.lh@web.de>
Co-authored-by: Giteabot <teabot@gitea.io>
2023-05-22 04:05:11 +05:30
issue . PosterID ,
2019-04-23 02:10:51 +05:30
issueType ,
ctx . Repo . Repository ,
ctx . Repo . Permission )
} else {
log . Trace ( "Permission Denied: Not logged in" )
}
}
2021-04-05 21:00:52 +05:30
ctx . Error ( http . StatusForbidden )
2020-01-20 17:30:32 +05:30
return
2019-02-19 02:25:04 +05:30
}
2022-03-22 12:33:22 +05:30
if issue . IsLocked && ! ctx . Repo . CanWriteIssuesOrPulls ( issue . IsPull ) && ! ctx . Doer . IsAdmin {
2023-06-16 12:02:43 +05:30
ctx . JSONError ( ctx . Tr ( "repo.issues.comment_on_locked" ) )
2018-11-28 16:56:14 +05:30
return
}
2015-08-13 13:37:11 +05:30
var attachments [ ] string
2020-08-18 09:53:45 +05:30
if setting . Attachment . Enabled {
2016-08-11 18:18:08 +05:30
attachments = form . Files
2014-07-26 11:58:04 +05:30
}
2015-08-13 13:37:11 +05:30
if ctx . HasError ( ) {
2023-06-16 12:02:43 +05:30
ctx . JSONError ( ctx . GetErrMsg ( ) )
2015-08-13 13:37:11 +05:30
return
2014-07-26 11:58:04 +05:30
}
2022-06-13 15:07:59 +05:30
var comment * issues_model . Comment
2015-09-13 20:56:25 +05:30
defer func ( ) {
2015-11-01 04:29:07 +05:30
// Check if issue admin/poster changes the status of issue.
2022-03-22 12:33:22 +05:30
if ( ctx . Repo . CanWriteIssuesOrPulls ( issue . IsPull ) || ( ctx . IsSigned && issue . IsPoster ( ctx . Doer . ID ) ) ) &&
2015-09-13 20:56:25 +05:30
( form . Status == "reopen" || form . Status == "close" ) &&
2016-08-16 22:49:09 +05:30
! ( issue . IsPull && issue . PullRequest . HasMerged ) {
2015-10-25 12:40:22 +05:30
// Duplication and conflict check should apply to reopen pull request.
2022-06-13 15:07:59 +05:30
var pr * issues_model . PullRequest
2015-10-19 05:00:39 +05:30
2015-10-23 20:01:13 +05:30
if form . Status == "reopen" && issue . IsPull {
2015-10-19 05:00:39 +05:30
pull := issue . PullRequest
2019-06-13 01:11:28 +05:30
var err error
2022-11-19 13:42:33 +05:30
pr , err = issues_model . GetUnmergedPullRequest ( ctx , pull . HeadRepoID , pull . BaseRepoID , pull . HeadBranch , pull . BaseBranch , pull . Flow )
2015-10-19 05:00:39 +05:30
if err != nil {
2022-06-13 15:07:59 +05:30
if ! issues_model . IsErrPullRequestNotExist ( err ) {
2023-06-16 12:02:43 +05:30
ctx . JSONError ( ctx . Tr ( "repo.issues.dependency.pr_close_blocked" ) )
2015-10-19 05:00:39 +05:30
return
}
}
2015-10-25 12:40:22 +05:30
// Regenerate patch and test conflict.
if pr == nil {
2021-07-28 15:12:56 +05:30
issue . PullRequest . HeadCommitID = ""
2023-07-22 19:44:27 +05:30
pull_service . AddToTaskQueue ( ctx , issue . PullRequest )
2015-10-25 12:40:22 +05:30
}
2023-05-08 12:09:32 +05:30
// check whether the ref of PR <refs/pulls/pr_index/head> in base repo is consistent with the head commit of head branch in the head repo
// get head commit of PR
2023-08-19 14:59:34 +05:30
if pull . Flow == issues_model . PullRequestFlowGithub {
if err := pull . LoadBaseRepo ( ctx ) ; err != nil {
ctx . ServerError ( "Unable to load base repo" , err )
return
}
2024-02-17 20:00:41 +05:30
if err := pull . LoadHeadRepo ( ctx ) ; err != nil {
ctx . ServerError ( "Unable to load head repo" , err )
2023-08-19 14:59:34 +05:30
return
}
2023-05-08 12:09:32 +05:30
2024-02-17 20:00:41 +05:30
// Check if the base branch of the pull request still exists.
if ok := git . IsBranchExist ( ctx , pull . BaseRepo . RepoPath ( ) , pull . BaseBranch ) ; ! ok {
ctx . JSONError ( ctx . Tr ( "repo.pulls.reopen_failed.base_branch" ) )
2023-08-19 14:59:34 +05:30
return
}
2024-02-17 20:00:41 +05:30
// Check if the head branch of the pull request still exists.
if ok := git . IsBranchExist ( ctx , pull . HeadRepo . RepoPath ( ) , pull . HeadBranch ) ; ! ok {
ctx . JSONError ( ctx . Tr ( "repo.pulls.reopen_failed.head_branch" ) )
2023-08-19 14:59:34 +05:30
return
}
2024-02-17 20:00:41 +05:30
prHeadRef := pull . GetGitRefName ( )
prHeadCommitID , err := git . GetFullCommitID ( ctx , pull . BaseRepo . RepoPath ( ) , prHeadRef )
if err != nil {
ctx . ServerError ( "Get head commit Id of pr fail" , err )
return
}
2023-08-19 14:59:34 +05:30
headBranchRef := pull . GetGitHeadBranchRefName ( )
headBranchCommitID , err := git . GetFullCommitID ( ctx , pull . HeadRepo . RepoPath ( ) , headBranchRef )
if err != nil {
ctx . ServerError ( "Get head commit Id of head branch fail" , err )
return
}
2023-05-08 12:09:32 +05:30
2023-08-19 14:59:34 +05:30
err = pull . LoadIssue ( ctx )
2023-05-08 12:09:32 +05:30
if err != nil {
2023-08-19 14:59:34 +05:30
ctx . ServerError ( "load the issue of pull request error" , err )
2023-05-08 12:09:32 +05:30
return
}
2023-08-19 14:59:34 +05:30
if prHeadCommitID != headBranchCommitID {
// force push to base repo
err := git . Push ( ctx , pull . HeadRepo . RepoPath ( ) , git . PushOptions {
Remote : pull . BaseRepo . RepoPath ( ) ,
Branch : pull . HeadBranch + ":" + prHeadRef ,
Force : true ,
Env : repo_module . InternalPushingEnvironment ( pull . Issue . Poster , pull . BaseRepo ) ,
} )
if err != nil {
ctx . ServerError ( "force push error" , err )
return
}
}
2023-05-08 12:09:32 +05:30
}
2015-10-19 05:00:39 +05:30
}
if pr != nil {
ctx . Flash . Info ( ctx . Tr ( "repo.pulls.open_unmerged_pull_exists" , pr . Index ) )
2015-09-13 20:56:25 +05:30
} else {
2018-10-18 16:53:05 +05:30
isClosed := form . Status == "close"
2023-07-22 19:44:27 +05:30
if err := issue_service . ChangeStatus ( ctx , issue , ctx . Doer , "" , isClosed ) ; err != nil {
2019-04-02 13:18:31 +05:30
log . Error ( "ChangeStatus: %v" , err )
2018-07-18 02:53:58 +05:30
2022-06-13 15:07:59 +05:30
if issues_model . IsErrDependenciesLeft ( err ) {
2018-07-18 02:53:58 +05:30
if issue . IsPull {
2023-06-16 12:02:43 +05:30
ctx . JSONError ( ctx . Tr ( "repo.issues.dependency.pr_close_blocked" ) )
2018-07-18 02:53:58 +05:30
} else {
2023-06-16 12:02:43 +05:30
ctx . JSONError ( ctx . Tr ( "repo.issues.dependency.issue_close_blocked" ) )
2018-07-18 02:53:58 +05:30
}
return
}
2015-10-19 05:00:39 +05:30
} else {
2023-09-16 20:09:12 +05:30
if err := stopTimerIfAvailable ( ctx , ctx . Doer , issue ) ; err != nil {
2019-02-05 17:08:11 +05:30
ctx . ServerError ( "CreateOrStopIssueStopwatch" , err )
return
}
2016-02-22 23:10:00 +05:30
log . Trace ( "Issue [%d] status changed to closed: %v" , issue . ID , issue . IsClosed )
2015-10-19 05:00:39 +05:30
}
2015-09-13 20:56:25 +05:30
}
}
2015-10-19 05:00:39 +05:30
// Redirect to comment hashtag if there is any actual content.
typeName := "issues"
if issue . IsPull {
typeName = "pulls"
}
if comment != nil {
2023-06-16 12:02:43 +05:30
ctx . JSONRedirect ( fmt . Sprintf ( "%s/%s/%d#%s" , ctx . Repo . RepoLink , typeName , issue . Index , comment . HashTag ( ) ) )
2015-10-19 05:00:39 +05:30
} else {
2023-06-16 12:02:43 +05:30
ctx . JSONRedirect ( fmt . Sprintf ( "%s/%s/%d" , ctx . Repo . RepoLink , typeName , issue . Index ) )
2015-10-19 05:00:39 +05:30
}
2015-09-13 20:56:25 +05:30
} ( )
2015-08-13 13:37:11 +05:30
// Fix #321: Allow empty comments, as long as we have attachments.
if len ( form . Content ) == 0 && len ( attachments ) == 0 {
return
2014-07-26 11:58:04 +05:30
}
2022-12-10 08:16:31 +05:30
comment , err := issue_service . CreateIssueComment ( ctx , ctx . Doer , ctx . Repo . Repository , issue , form . Content , attachments )
2015-08-13 13:37:11 +05:30
if err != nil {
[MODERATION] User blocking
- Add the ability to block a user via their profile page.
- This will unstar their repositories and visa versa.
- Blocked users cannot create issues or pull requests on your the doer's repositories (mind that this is not the case for organizations).
- Blocked users cannot comment on the doer's opened issues or pull requests.
- Blocked users cannot add reactions to doer's comments.
- Blocked users cannot cause a notification trough mentioning the doer.
Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/540
(cherry picked from commit 687d852480388897db4d7b0cb397cf7135ab97b1)
(cherry picked from commit 0c32a4fde531018f74e01d9db6520895fcfa10cc)
(cherry picked from commit 1791130e3cb8470b9b39742e0004d5e4c7d1e64d)
(cherry picked from commit 37858b7e8fb6ba6c6ea0ac2562285b3b144efa19)
(cherry picked from commit a3e2bfd7e9eab82cc2c17061f6bb4e386a108c46)
(cherry picked from commit 7009b9fe87696b6182fab65ae82bf5a25cd39971)
Conflicts: https://codeberg.org/forgejo/forgejo/pulls/1014
routers/web/user/profile.go
templates/user/profile.tmpl
(cherry picked from commit b2aec3479177e725cfc7cbbb9d94753226928d1c)
(cherry picked from commit e2f1b73752f6bd3f830297d8f4ac438837471226)
[MODERATION] organization blocking a user (#802)
- Resolves #476
- Follow up for: #540
- Ensure that the doer and blocked person cannot follow each other.
- Ensure that the block person cannot watch doer's repositories.
- Add unblock button to the blocked user list.
- Add blocked since information to the blocked user list.
- Add extra testing to moderation code.
- Blocked user will unwatch doer's owned repository upon blocking.
- Add flash messages to let the user know the block/unblock action was successful.
- Add "You haven't blocked any users" message.
- Add organization blocking a user.
Co-authored-by: Gusted <postmaster@gusted.xyz>
Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/802
(cherry picked from commit 0505a1042197bd9136b58bc70ec7400a23471585)
(cherry picked from commit 37b4e6ef9b85e97d651cf350c9f3ea272ee8d76a)
(cherry picked from commit c17c121f2cf1f00e2a8d6fd6847705df47d0771e)
[MODERATION] organization blocking a user (#802) (squash)
Changes to adapt to:
6bbccdd177 Improve AJAX link and modal confirm dialog (#25210)
Refs: https://codeberg.org/forgejo/forgejo/pulls/882/files#issuecomment-945962
Refs: https://codeberg.org/forgejo/forgejo/pulls/882#issue-330561
(cherry picked from commit 523635f83cb2a1a4386769b79326088c5c4bbec7)
(cherry picked from commit 4743eaa6a0be0ef47de5b17c211dfe8bad1b7af9)
(cherry picked from commit eff5b43d2e843d5d537756d4fa58a8a010b6b527)
Conflicts: https://codeberg.org/forgejo/forgejo/pulls/1014
routers/web/user/profile.go
(cherry picked from commit 9d359be5ed11237088ccf6328571939af814984e)
(cherry picked from commit b1f3069a22a03734cffbfcd503ce004ba47561b7)
[MODERATION] add user blocking API
- Follow up for: #540, #802
- Add API routes for user blocking from user and organization
perspective.
- The new routes have integration testing.
- The new model functions have unit tests.
- Actually quite boring to write and to read this pull request.
(cherry picked from commit f3afaf15c7e34038363c9ce8e1ef957ec1e22b06)
(cherry picked from commit 6d754db3e5faff93a58fab2867737f81f40f6599)
(cherry picked from commit 2a89ddc0acffa9aea0f02b721934ef9e2b496a88)
(cherry picked from commit 4a147bff7e963ab9dffcfaefa5c2c01c59b4c732)
Conflicts:
routers/api/v1/api.go
templates/swagger/v1_json.tmpl
(cherry picked from commit bb8c33918569f65f25b014f0d7fe6ac20f9036fc)
(cherry picked from commit 5a11569a011b7d0a14391e2b5c07d0af825d7b0e)
(cherry picked from commit 2373c801ee6b84c368b498b16e6ad18650b38f42)
[MODERATION] restore redirect on unblock
ctx.RedirectToFirst(ctx.FormString("redirect_to"), ctx.ContextUser.HomeLink())
was replaced by
ctx.JSONOK()
in 128d77a3a Following up fixes for "Fix inconsistent user profile layout across tabs" (#25739)
thus changing the behavior (nicely spotted by the tests). This
restores it.
(cherry picked from commit 597c243707c3c86e7256faf1e6ba727224554de3)
(cherry picked from commit cfa539e590127b4b953b010fba3dea21c82a1714)
[MODERATION] Add test case (squash)
- Add an test case, to test an property of the function.
(cherry picked from commit 70dadb1916bfef8ba8cbc4e9b042cc8740f45e28)
[MODERATION] Block adding collaborators
- Ensure that the doer and blocked user cannot add each other as
collaborators to repositories.
- The Web UI gets an detailed message of the specific situation, the API
gets an generic Forbidden code.
- Unit tests has been added.
- Integration testing for Web and API has been added.
- This commit doesn't introduce removing each other as collaborators on
the block action, due to the complexity of database calls that needs to
be figured out. That deserves its own commit and test code.
(cherry picked from commit 747be949a1b3cd06f6586512f1af4630e55d7ad4)
[MODERATION] move locale_en-US.ini strings to avoid conflicts
Conflicts:
web_src/css/org.css
web_src/css/user.css
https://codeberg.org/forgejo/forgejo/pulls/1180
(cherry picked from commit e53f955c888ebaafc863a6e463da87f70f5605da)
Conflicts:
services/issue/comments.go
https://codeberg.org/forgejo/forgejo/pulls/1212
(cherry picked from commit b4a454b576eee0c7738b2f7df1acaf5bf7810d12)
Conflicts:
models/forgejo_migrations/migrate.go
options/locale/locale_en-US.ini
services/pull/pull.go
https://codeberg.org/forgejo/forgejo/pulls/1264
[MODERATION] Remove blocked user collaborations with doer
- When the doer blocks an user, who is also an collaborator on an
repository that the doer owns, remove that collaboration.
- Added unit tests.
- Refactor the unit test to be more organized.
(cherry picked from commit ec8701617830152680d69d50d64cb43cc2054a89)
(cherry picked from commit 313e6174d832501c57724ae7a6285194b7b81aab)
[MODERATION] QoL improvements (squash)
- Ensure that organisations cannot be blocked. It currently has no
effect, as all blocked operations cannot be executed from an
organisation standpoint.
- Refactored the API route to make use of the `UserAssignmentAPI`
middleware.
- Make more use of `t.Run` so that the test code is more clear about
which block of code belongs to which test case.
- Added more integration testing (to ensure the organisations cannot be
blocked and some authorization/permission checks).
(cherry picked from commit e9d638d0756ee20b6bf1eb999c988533a5066a68)
[MODERATION] s/{{avatar/{{ctx.AvatarUtils.Avatar/
(cherry picked from commit ce8b30be1327ab98df2ba061dd7e2a278b278c5b)
(cherry picked from commit f911dc402508b04cd5d5fb2f3332c2d640e4556e)
Conflicts:
options/locale/locale_en-US.ini
https://codeberg.org/forgejo/forgejo/pulls/1354
(cherry picked from commit c1b37b7fdaf06ee60da341dff76d703990c08082)
(cherry picked from commit 856a2e09036adf56d987c6eee364c431bc37fb2e)
[MODERATION] Show graceful error on comment creation
- When someone is blocked by the repository owner or issue poster and
try to comment on that issue, they get shown a graceful error.
- Adds integration test.
(cherry picked from commit 490646302e1e3dc3c59c9d75938b4647b6873ce7)
(cherry picked from commit d3d88667cbb928a6ff80658eba8ef0c6c508c9e0)
(cherry picked from commit 6818de13a921753e082b7c3d64c23917cc884e4b)
[MODERATION] Show graceful error on comment creation (squash) typo
(cherry picked from commit 1588d4834a37a744f092f2aeea6c9ef4795d7356)
(cherry picked from commit d510ea52d091503e841d66f2f604348add8b4535)
(cherry picked from commit 8249e93a14f628bb0e89fe3be678e4966539944e)
[MODERATION] Refactor integration testing (squash)
- Motivation for this PR is that I'd noticed that a lot of repeated
calls are happening between the test functions and that certain tests
weren't using helper functions like `GetCSRF`, therefor this refactor of
the integration tests to keep it: clean, small and hopefully more
maintainable and understandable.
- There are now three integration tests: `TestBlockUser`,
`TestBlockUserFromOrganization` and `TestBlockActions` (and has been
moved in that order in the source code).
- `TestBlockUser` is for doing blocking related actions as an user and
`TestBlockUserFromOrganization` as an organisation, even though they
execute the same kind of tests they do not share any database calls or
logic and therefor it currently doesn't make sense to merge them
together (hopefully such oppurtinutiy might be presented in the future).
- `TestBlockActions` now contain all tests for actions that should be
blocked after blocking has happened, most tests now share the same doer
and blocked users and a extra fixture has been added to make this
possible for the comment test.
- Less code, more comments and more re-use between tests.
(cherry picked from commit ffb393213d2f1269aad3c019d039cf60d0fe4b10)
(cherry picked from commit 85505e0f815fede589c272d301c95204f9596985)
(cherry picked from commit 0f3cf17761f6caedb17550f69de96990c2090af1)
[MODERATION] Fix network error (squash)
- Fix network error toast messages on user actions such as follow and
unfollow. This happened because the javascript code now expects an JSON
to be returned, but this wasn't the case due to
cfa539e590127b4953b010fba3dea21c82a1714.
- The integration testing has been adjusted to instead test for the
returned flash cookie.
(cherry picked from commit 112bc25e548d317a4ee00f9efa9068794a733e3b)
(cherry picked from commit 1194fe4899eb39dcb9a2410032ad0cc67a62b92b)
(cherry picked from commit 9abb95a8441e227874fe156095349a3173cc5a81)
[MODERATION] Modernize frontend (squash)
- Unify blocked users list.
- Use the new flex list classes for blocked users list to avoid using
the CSS helper classes and thereby be consistent in the design.
- Fix the modal by using the new modal class.
- Remove the icon in the modal as looks too big in the new design.
- Fix avatar not displaying as it was passing the context where the user
should've been passed.
- Don't use italics for 'Blocked since' text.
- Use namelink template to display the user's name and homelink.
(cherry picked from commit ec935a16a319b14e819ead828d1d9875280d9259)
(cherry picked from commit 67f37c83461aa393c53a799918e9708cb9b89b30)
Conflicts:
models/user/follow.go
models/user/user_test.go
routers/api/v1/user/follower.go
routers/web/shared/user/header.go
routers/web/user/profile.go
templates/swagger/v1_json.tmpl
https://codeberg.org/forgejo/forgejo/pulls/1468
(cherry picked from commit 6a9626839c6342cd2767ea12757ee2f78eaf443b)
Conflicts:
tests/integration/api_nodeinfo_test.go
https://codeberg.org/forgejo/forgejo/pulls/1508#issuecomment-1242385
(cherry picked from commit 7378b251b481ed1e60e816caf8f649e8397ee5fc)
Conflicts:
models/fixtures/watch.yml
models/issues/reaction.go
models/issues/reaction_test.go
routers/api/v1/repo/issue_reaction.go
routers/web/repo/issue.go
services/issue/issue.go
https://codeberg.org/forgejo/forgejo/pulls/1547
(cherry picked from commit c2028930c101223820de0bbafc318e9394c347b8)
(cherry picked from commit d3f9134aeeef784586e8412e8dbba0a8fceb0cd4)
(cherry picked from commit 7afe154c5c40bcc65accdf51c9224b2f7627a684)
(cherry picked from commit 99ac7353eb1e834a77fe42aa89208791cc2364ff)
(cherry picked from commit a9cde00c5c25ea8c427967cb7ab57abb618e44cb)
Conflicts:
services/user/delete.go
https://codeberg.org/forgejo/forgejo/pulls/1736
(cherry picked from commit 008c0cc63d1a3b8eb694bffbf77a7b25c56afd57)
[DEADCODE] add exceptions
(cherry picked from commit 12ddd2b10e3309f6430b0af42855c6af832832ee)
[MODERATION] Remove deadcode (squash)
- Remove deadcode that's no longer used by Forgejo.
(cherry picked from commit 0faeab4fa9b0aa59f86760b24ecbc07815026c82)
[MODERATION] Add repo transfers to blocked functionality (squash)
- When someone gets blocked, remove all pending repository transfers
from the blocked user to the doer.
- Do not allow to start transferring repositories to the doer as blocked user.
- Added unit testing.
- Added integration testing.
(cherry picked from commit 8a3caac33013482ddbee2fa51510c6918ba54466)
(cherry picked from commit a92b4cfeb63b90eb2d90d0feb51cec62e0502d84)
(cherry picked from commit acaaaf07d999974dbe5f9c5e792621c597bfb542)
(cherry picked from commit 735818863c1793aa6f6983afedc4bd3b36026ca5)
(cherry picked from commit f50fa43b32160d0d88eca1dbdca09b5f575fb62b)
(cherry picked from commit e16683643388fb3c60ea478f1419a6af4f4aa283)
(cherry picked from commit 82a0e4a3814a66ce44be6a031bdf08484586c61b)
(cherry picked from commit ff233c19c4a5edcc2b99a6f41a2d19dbe8c08b3b)
(cherry picked from commit 8ad87d215f2b6adb978de77e53ba2bf7ea571430)
[MODERATION] Fix unblock action (squash)
- Pass the whole context instead of only giving pieces.
- This fixes CSRF not correctly being inserted into the unblock buttons.
(cherry picked from commit 2aa51922ba6a0ea2f8644277baa74fc8f34ab95a)
(cherry picked from commit 7ee8db0f018340bc97f125415503e3e5db5f5082)
(cherry picked from commit e4f8b999bcd3b68b3ef7f54f5b17c3ada0308121)
(cherry picked from commit 05aea60b1302bbd3ea574a9c6c34e1005a5d73bf)
(cherry picked from commit dc0d61b012cfaf2385f71e97cda5f220b58b9fa4)
(cherry picked from commit f53fa583de671ff60a0a1d0f3ab8c260e1ba4e1f)
(cherry picked from commit c65b89a58d11b32009c710c2f5e75f0cd3539395)
(cherry picked from commit 69e50b9969db3ab71cefaed520757876a9629a5c)
(cherry picked from commit ec127440b86cb5fcf51799d8bd76a9fd6b9cebcc)
[MODERATION] cope with shared fixtures
* There is one more issue in the fixtures and this breaks some tests
* The users in the shared fixtures were renamed for clarity and that
breaks some tests
(cherry picked from commit 707a4edbdf67d0eb168d7bb430cf85dd8cd63c52)
Conflicts:
modules/indexer/issues/indexer_test.go
https://codeberg.org/forgejo/forgejo/pulls/1508
(cherry picked from commit 82cc044366c749df80ffad44eed2988b8e64211e)
(cherry picked from commit 2776aec7e85850f1d7f01a090a72491550fb9d29)
(cherry picked from commit 1fbde36dc784b5b2cc6193f02ff0d436b0f2a629)
(cherry picked from commit 1293db3c4e5df218501f5add9f9d41101ffcb8aa)
(cherry picked from commit 6476802175bac3ef78dd8f24ff6bebc16f398a78)
(cherry picked from commit 5740f2fc830356acb7929a02fe304008b94a0ca5)
(cherry picked from commit afc12d7b6e9b773fa89718aa79cd95c0e0ce4406)
[MODERATION] Fix transfer confirmation (squash)
- Fix problem caused by the clearer confirmation for dangerous actions commit.
(cherry picked from commit 3488f4a9cb1f7f73103ae0017d644f13ca3ab798)
(cherry picked from commit ed7de91f6ace23a1459bc6552edf719d62c7c941)
(cherry picked from commit 2d97929b9b7b8d979eb12bf0994d3f169d41f7fd)
(cherry picked from commit 50d035a7b058b9c4486c38cd4be0b02a4e1bf4d9)
(cherry picked from commit 0a0c07d78a1dee3489b97ab359bb957e3f7fb94b)
(cherry picked from commit 85e55c4dbc2f513f3d5254dac20915e8c3c22886)
(cherry picked from commit d8282122ad6e8b497de35d1ed89e3093a2cd5ee2)
(cherry picked from commit 3f0b3b6cc582c3d672d371dd9fe1203a56cb88c0)
[MODERATION] Purge issues on user deletion (squash)
(cherry picked from commit 4f529d9596ffbfc4e754c28830ba028f6344dc5b)
(cherry picked from commit f0e3acadd321fcb99e8ea3e3ce1c69df25c4ca4d)
(cherry picked from commit 682c4effe69dc0d4ed304fa7ce6259d9ce573629)
(cherry picked from commit e43c2d84fd4b6fd31e2370cec1034262d12e5c34)
(cherry picked from commit 9c8e53ccc78053026e4f667889959c23c8d95934)
(cherry picked from commit a9eb7ac783b2c16ee3702a88203bf857cb4147fc)
[MODERATION] Purge issues on user deletion (squash) revert shared fixtures workarounds
(cherry picked from commit 7224653a40e32186892e89bfedd49edecf5b8f81)
(cherry picked from commit aa6e8672f9473a9100e7575051dec9eda37709a0)
(cherry picked from commit 58c7947e95648f50237ddcd46b6bd025b224a70f)
(cherry picked from commit f1aacb1851b232082febcd7870a40a56de3855a6)
(cherry picked from commit 0bf174af87f7de9a8d869304f709e2bf41f3dde9)
(cherry picked from commit f9706f4335df3b7688ed60853d917efa72fb464a)
[MODERATION] Prepare moderation for context locale changes (squash)
- Resolves https://codeberg.org/forgejo/forgejo/issues/1711
(cherry picked from commit 2e289baea943dcece88f02d110b03d344308a261)
(cherry picked from commit 97b16bc19ae680db62608d6020b00fe5ac451c60)
[MODERATION] User blocking (squash) do not use shared fixture
It conflicts with a fixtured added in the commit
Fix comment permissions (#28213) (#28216)
(cherry picked from commit ab40799dcab24e9f495d765268b791931da81684)
(cherry picked from commit 996c92cafdb5b33a6d2d05d94038e950d97eb7de)
(cherry picked from commit 259912e3a69071c5ad57871464d0b79f69a8e72c)
Conflicts:
options/locale/locale_en-US.ini
https://codeberg.org/forgejo/forgejo/pulls/1921
(cherry picked from commit 1e82abc032c18015b92c93a7617a5dd06d50bd2d)
(cherry picked from commit a176fee1607d571b25b345184f1c50d403029610)
(cherry picked from commit 0480b76dfeda968849e900da9454a3efd82590fa)
(cherry picked from commit 4bc06b7b3841c74e3d790b1ef635c2b382ca7123)
(cherry picked from commit 073094cf722a927a623408d66537c758d7d64e4c)
(cherry picked from commit ac6201c647a4d3a2cfb2b0303b851a8fe7a29444)
(cherry picked from commit 7e0812674da3fbd1e96bdda820962edad6826fbd)
(cherry picked from commit 068c741e5696957710b3d1c2e18c00be2ffaa278)
Conflicts:
models/repo_transfer.go
models/repo_transfer_test.go
routers/web/user/profile.go
https://codeberg.org/forgejo/forgejo/pulls/2298
2023-08-15 04:37:38 +05:30
if errors . Is ( err , user_model . ErrBlockedByUser ) {
ctx . Flash . Error ( ctx . Tr ( "repo.issues.comment.blocked_by_user" ) )
} else {
ctx . ServerError ( "CreateIssueComment" , err )
}
2014-07-26 11:58:04 +05:30
return
}
2015-08-13 13:37:11 +05:30
log . Trace ( "Comment created: %d/%d/%d" , ctx . Repo . Repository . ID , issue . ID , comment . ID )
2014-07-26 11:58:04 +05:30
}
2016-11-24 12:34:31 +05:30
// UpdateCommentContent change comment of issue's content
2016-03-11 22:26:52 +05:30
func UpdateCommentContent ( ctx * context . Context ) {
2022-06-13 15:07:59 +05:30
comment , err := issues_model . GetCommentByID ( ctx , ctx . ParamsInt64 ( ":id" ) )
2015-08-20 02:01:28 +05:30
if err != nil {
2022-06-13 15:07:59 +05:30
ctx . NotFoundOrServerError ( "GetCommentByID" , issues_model . IsErrCommentNotExist , err )
2015-08-20 02:01:28 +05:30
return
}
2022-11-19 13:42:33 +05:30
if err := comment . LoadIssue ( ctx ) ; err != nil {
2022-06-13 15:07:59 +05:30
ctx . NotFoundOrServerError ( "LoadIssue" , issues_model . IsErrIssueNotExist , err )
2018-11-28 16:56:14 +05:30
return
}
2023-11-25 22:51:21 +05:30
if comment . Issue . RepoID != ctx . Repo . Repository . ID {
ctx . NotFound ( "CompareRepoID" , issues_model . ErrCommentNotExist { } )
return
}
2022-03-22 12:33:22 +05:30
if ! ctx . IsSigned || ( ctx . Doer . ID != comment . PosterID && ! ctx . Repo . CanWriteIssuesOrPulls ( comment . Issue . IsPull ) ) {
2021-04-05 21:00:52 +05:30
ctx . Error ( http . StatusForbidden )
2015-08-20 02:01:28 +05:30
return
2022-01-18 22:58:38 +05:30
}
2023-04-20 12:09:44 +05:30
if ! comment . Type . HasContentSupport ( ) {
2021-04-05 21:00:52 +05:30
ctx . Error ( http . StatusNoContent )
2015-08-20 02:01:28 +05:30
return
}
2018-05-16 19:31:55 +05:30
oldContent := comment . Content
2024-05-27 21:04:18 +05:30
newContent := ctx . FormString ( "content" )
contentVersion := ctx . FormInt ( "content_version" )
comment . Content = newContent
if err = issue_service . UpdateComment ( ctx , comment , contentVersion , ctx . Doer , oldContent ) ; err != nil {
if errors . Is ( err , issues_model . ErrCommentAlreadyChanged ) {
ctx . JSONError ( ctx . Tr ( "repo.comments.edit.already_changed" ) )
} else {
ctx . ServerError ( "UpdateComment" , err )
}
2015-08-20 02:01:28 +05:30
return
}
2022-11-19 13:42:33 +05:30
if err := comment . LoadAttachments ( ctx ) ; err != nil {
2022-01-18 22:58:38 +05:30
ctx . ServerError ( "LoadAttachments" , err )
return
2021-08-21 00:56:19 +05:30
}
2021-08-21 18:34:47 +05:30
// when the update request doesn't intend to update attachments (eg: change checkbox state), ignore attachment updates
if ! ctx . FormBool ( "ignore_attachments" ) {
2022-05-20 19:38:52 +05:30
if err := updateAttachments ( ctx , comment , ctx . FormStrings ( "files[]" ) ) ; err != nil {
2021-08-21 18:34:47 +05:30
ctx . ServerError ( "UpdateAttachments" , err )
return
}
2021-04-20 03:55:08 +05:30
}
content , err := markdown . RenderString ( & markup . RenderContext {
2024-01-15 14:19:24 +05:30
Links : markup . Links {
Base : ctx . FormString ( "context" ) , // FIXME: <- IS THIS SAFE ?
} ,
Metas : ctx . Repo . Repository . ComposeMetas ( ctx ) ,
GitRepo : ctx . Repo . GitRepo ,
Ctx : ctx ,
2021-04-20 03:55:08 +05:30
} , comment . Content )
if err != nil {
ctx . ServerError ( "RenderString" , err )
return
2019-10-15 17:49:32 +05:30
}
2023-07-05 00:06:08 +05:30
ctx . JSON ( http . StatusOK , map [ string ] any {
2024-05-27 21:04:18 +05:30
"content" : content ,
"contentVersion" : comment . ContentVersion ,
"attachments" : attachmentsHTML ( ctx , comment . Attachments , comment . Content ) ,
2015-08-20 02:01:28 +05:30
} )
}
2016-11-24 12:34:31 +05:30
// DeleteComment delete comment of issue
2016-07-26 00:18:17 +05:30
func DeleteComment ( ctx * context . Context ) {
2022-06-13 15:07:59 +05:30
comment , err := issues_model . GetCommentByID ( ctx , ctx . ParamsInt64 ( ":id" ) )
2016-07-26 00:18:17 +05:30
if err != nil {
2022-06-13 15:07:59 +05:30
ctx . NotFoundOrServerError ( "GetCommentByID" , issues_model . IsErrCommentNotExist , err )
2016-07-26 00:18:17 +05:30
return
}
2022-11-19 13:42:33 +05:30
if err := comment . LoadIssue ( ctx ) ; err != nil {
2022-06-13 15:07:59 +05:30
ctx . NotFoundOrServerError ( "LoadIssue" , issues_model . IsErrIssueNotExist , err )
2018-11-28 16:56:14 +05:30
return
}
2023-11-25 22:51:21 +05:30
if comment . Issue . RepoID != ctx . Repo . Repository . ID {
ctx . NotFound ( "CompareRepoID" , issues_model . ErrCommentNotExist { } )
return
}
2022-03-22 12:33:22 +05:30
if ! ctx . IsSigned || ( ctx . Doer . ID != comment . PosterID && ! ctx . Repo . CanWriteIssuesOrPulls ( comment . Issue . IsPull ) ) {
2021-04-05 21:00:52 +05:30
ctx . Error ( http . StatusForbidden )
2016-07-26 00:18:17 +05:30
return
2023-04-20 12:09:44 +05:30
} else if ! comment . Type . HasContentSupport ( ) {
2021-04-05 21:00:52 +05:30
ctx . Error ( http . StatusNoContent )
2016-07-26 00:18:17 +05:30
return
}
2022-12-10 08:16:31 +05:30
if err = issue_service . DeleteComment ( ctx , ctx . Doer , comment ) ; err != nil {
2022-11-19 13:42:33 +05:30
ctx . ServerError ( "DeleteComment" , err )
2016-07-26 00:18:17 +05:30
return
}
2022-03-23 10:24:07 +05:30
ctx . Status ( http . StatusOK )
2016-07-26 00:18:17 +05:30
}
2017-12-04 04:44:26 +05:30
// ChangeIssueReaction create a reaction for issue
2021-01-26 21:06:53 +05:30
func ChangeIssueReaction ( ctx * context . Context ) {
2021-04-07 01:14:05 +05:30
form := web . GetForm ( ctx ) . ( * forms . ReactionForm )
2017-12-04 04:44:26 +05:30
issue := GetActionIssue ( ctx )
if ctx . Written ( ) {
return
}
2022-03-22 12:33:22 +05:30
if ! ctx . IsSigned || ( ctx . Doer . ID != issue . PosterID && ! ctx . Repo . CanReadIssuesOrPulls ( issue . IsPull ) ) {
2019-04-23 02:10:51 +05:30
if log . IsTrace ( ) {
if ctx . IsSigned {
issueType := "issues"
if issue . IsPull {
issueType = "pulls"
}
log . Trace ( "Permission Denied: User %-v not the Poster (ID: %d) and cannot read %s in Repo %-v.\n" +
"User in Repo has Permissions: %-+v" ,
2022-03-22 12:33:22 +05:30
ctx . Doer ,
Rewrite logger system (#24726)
## ⚠️ Breaking
The `log.<mode>.<logger>` style config has been dropped. If you used it,
please check the new config manual & app.example.ini to make your
instance output logs as expected.
Although many legacy options still work, it's encouraged to upgrade to
the new options.
The SMTP logger is deleted because SMTP is not suitable to collect logs.
If you have manually configured Gitea log options, please confirm the
logger system works as expected after upgrading.
## Description
Close #12082 and maybe more log-related issues, resolve some related
FIXMEs in old code (which seems unfixable before)
Just like rewriting queue #24505 : make code maintainable, clear legacy
bugs, and add the ability to support more writers (eg: JSON, structured
log)
There is a new document (with examples): `logging-config.en-us.md`
This PR is safer than the queue rewriting, because it's just for
logging, it won't break other logic.
## The old problems
The logging system is quite old and difficult to maintain:
* Unclear concepts: Logger, NamedLogger, MultiChannelledLogger,
SubLogger, EventLogger, WriterLogger etc
* Some code is diffuclt to konw whether it is right:
`log.DelNamedLogger("console")` vs `log.DelNamedLogger(log.DEFAULT)` vs
`log.DelLogger("console")`
* The old system heavily depends on ini config system, it's difficult to
create new logger for different purpose, and it's very fragile.
* The "color" trick is difficult to use and read, many colors are
unnecessary, and in the future structured log could help
* It's difficult to add other log formats, eg: JSON format
* The log outputer doesn't have full control of its goroutine, it's
difficult to make outputer have advanced behaviors
* The logs could be lost in some cases: eg: no Fatal error when using
CLI.
* Config options are passed by JSON, which is quite fragile.
* INI package makes the KEY in `[log]` section visible in `[log.sub1]`
and `[log.sub1.subA]`, this behavior is quite fragile and would cause
more unclear problems, and there is no strong requirement to support
`log.<mode>.<logger>` syntax.
## The new design
See `logger.go` for documents.
## Screenshot
<details>
![image](https://github.com/go-gitea/gitea/assets/2114189/4462d713-ba39-41f5-bb08-de912e67e1ff)
![image](https://github.com/go-gitea/gitea/assets/2114189/b188035e-f691-428b-8b2d-ff7b2199b2f9)
![image](https://github.com/go-gitea/gitea/assets/2114189/132e9745-1c3b-4e00-9e0d-15eaea495dee)
</details>
## TODO
* [x] add some new tests
* [x] fix some tests
* [x] test some sub-commands (manually ....)
---------
Co-authored-by: Jason Song <i@wolfogre.com>
Co-authored-by: delvh <dev.lh@web.de>
Co-authored-by: Giteabot <teabot@gitea.io>
2023-05-22 04:05:11 +05:30
issue . PosterID ,
2019-04-23 02:10:51 +05:30
issueType ,
ctx . Repo . Repository ,
ctx . Repo . Permission )
} else {
log . Trace ( "Permission Denied: Not logged in" )
}
}
2021-04-05 21:00:52 +05:30
ctx . Error ( http . StatusForbidden )
2018-11-28 16:56:14 +05:30
return
}
2017-12-04 04:44:26 +05:30
if ctx . HasError ( ) {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "ChangeIssueReaction" , errors . New ( ctx . GetErrMsg ( ) ) )
2017-12-04 04:44:26 +05:30
return
}
switch ctx . Params ( ":action" ) {
case "react" :
[MODERATION] User blocking
- Add the ability to block a user via their profile page.
- This will unstar their repositories and visa versa.
- Blocked users cannot create issues or pull requests on your the doer's repositories (mind that this is not the case for organizations).
- Blocked users cannot comment on the doer's opened issues or pull requests.
- Blocked users cannot add reactions to doer's comments.
- Blocked users cannot cause a notification trough mentioning the doer.
Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/540
(cherry picked from commit 687d852480388897db4d7b0cb397cf7135ab97b1)
(cherry picked from commit 0c32a4fde531018f74e01d9db6520895fcfa10cc)
(cherry picked from commit 1791130e3cb8470b9b39742e0004d5e4c7d1e64d)
(cherry picked from commit 37858b7e8fb6ba6c6ea0ac2562285b3b144efa19)
(cherry picked from commit a3e2bfd7e9eab82cc2c17061f6bb4e386a108c46)
(cherry picked from commit 7009b9fe87696b6182fab65ae82bf5a25cd39971)
Conflicts: https://codeberg.org/forgejo/forgejo/pulls/1014
routers/web/user/profile.go
templates/user/profile.tmpl
(cherry picked from commit b2aec3479177e725cfc7cbbb9d94753226928d1c)
(cherry picked from commit e2f1b73752f6bd3f830297d8f4ac438837471226)
[MODERATION] organization blocking a user (#802)
- Resolves #476
- Follow up for: #540
- Ensure that the doer and blocked person cannot follow each other.
- Ensure that the block person cannot watch doer's repositories.
- Add unblock button to the blocked user list.
- Add blocked since information to the blocked user list.
- Add extra testing to moderation code.
- Blocked user will unwatch doer's owned repository upon blocking.
- Add flash messages to let the user know the block/unblock action was successful.
- Add "You haven't blocked any users" message.
- Add organization blocking a user.
Co-authored-by: Gusted <postmaster@gusted.xyz>
Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/802
(cherry picked from commit 0505a1042197bd9136b58bc70ec7400a23471585)
(cherry picked from commit 37b4e6ef9b85e97d651cf350c9f3ea272ee8d76a)
(cherry picked from commit c17c121f2cf1f00e2a8d6fd6847705df47d0771e)
[MODERATION] organization blocking a user (#802) (squash)
Changes to adapt to:
6bbccdd177 Improve AJAX link and modal confirm dialog (#25210)
Refs: https://codeberg.org/forgejo/forgejo/pulls/882/files#issuecomment-945962
Refs: https://codeberg.org/forgejo/forgejo/pulls/882#issue-330561
(cherry picked from commit 523635f83cb2a1a4386769b79326088c5c4bbec7)
(cherry picked from commit 4743eaa6a0be0ef47de5b17c211dfe8bad1b7af9)
(cherry picked from commit eff5b43d2e843d5d537756d4fa58a8a010b6b527)
Conflicts: https://codeberg.org/forgejo/forgejo/pulls/1014
routers/web/user/profile.go
(cherry picked from commit 9d359be5ed11237088ccf6328571939af814984e)
(cherry picked from commit b1f3069a22a03734cffbfcd503ce004ba47561b7)
[MODERATION] add user blocking API
- Follow up for: #540, #802
- Add API routes for user blocking from user and organization
perspective.
- The new routes have integration testing.
- The new model functions have unit tests.
- Actually quite boring to write and to read this pull request.
(cherry picked from commit f3afaf15c7e34038363c9ce8e1ef957ec1e22b06)
(cherry picked from commit 6d754db3e5faff93a58fab2867737f81f40f6599)
(cherry picked from commit 2a89ddc0acffa9aea0f02b721934ef9e2b496a88)
(cherry picked from commit 4a147bff7e963ab9dffcfaefa5c2c01c59b4c732)
Conflicts:
routers/api/v1/api.go
templates/swagger/v1_json.tmpl
(cherry picked from commit bb8c33918569f65f25b014f0d7fe6ac20f9036fc)
(cherry picked from commit 5a11569a011b7d0a14391e2b5c07d0af825d7b0e)
(cherry picked from commit 2373c801ee6b84c368b498b16e6ad18650b38f42)
[MODERATION] restore redirect on unblock
ctx.RedirectToFirst(ctx.FormString("redirect_to"), ctx.ContextUser.HomeLink())
was replaced by
ctx.JSONOK()
in 128d77a3a Following up fixes for "Fix inconsistent user profile layout across tabs" (#25739)
thus changing the behavior (nicely spotted by the tests). This
restores it.
(cherry picked from commit 597c243707c3c86e7256faf1e6ba727224554de3)
(cherry picked from commit cfa539e590127b4b953b010fba3dea21c82a1714)
[MODERATION] Add test case (squash)
- Add an test case, to test an property of the function.
(cherry picked from commit 70dadb1916bfef8ba8cbc4e9b042cc8740f45e28)
[MODERATION] Block adding collaborators
- Ensure that the doer and blocked user cannot add each other as
collaborators to repositories.
- The Web UI gets an detailed message of the specific situation, the API
gets an generic Forbidden code.
- Unit tests has been added.
- Integration testing for Web and API has been added.
- This commit doesn't introduce removing each other as collaborators on
the block action, due to the complexity of database calls that needs to
be figured out. That deserves its own commit and test code.
(cherry picked from commit 747be949a1b3cd06f6586512f1af4630e55d7ad4)
[MODERATION] move locale_en-US.ini strings to avoid conflicts
Conflicts:
web_src/css/org.css
web_src/css/user.css
https://codeberg.org/forgejo/forgejo/pulls/1180
(cherry picked from commit e53f955c888ebaafc863a6e463da87f70f5605da)
Conflicts:
services/issue/comments.go
https://codeberg.org/forgejo/forgejo/pulls/1212
(cherry picked from commit b4a454b576eee0c7738b2f7df1acaf5bf7810d12)
Conflicts:
models/forgejo_migrations/migrate.go
options/locale/locale_en-US.ini
services/pull/pull.go
https://codeberg.org/forgejo/forgejo/pulls/1264
[MODERATION] Remove blocked user collaborations with doer
- When the doer blocks an user, who is also an collaborator on an
repository that the doer owns, remove that collaboration.
- Added unit tests.
- Refactor the unit test to be more organized.
(cherry picked from commit ec8701617830152680d69d50d64cb43cc2054a89)
(cherry picked from commit 313e6174d832501c57724ae7a6285194b7b81aab)
[MODERATION] QoL improvements (squash)
- Ensure that organisations cannot be blocked. It currently has no
effect, as all blocked operations cannot be executed from an
organisation standpoint.
- Refactored the API route to make use of the `UserAssignmentAPI`
middleware.
- Make more use of `t.Run` so that the test code is more clear about
which block of code belongs to which test case.
- Added more integration testing (to ensure the organisations cannot be
blocked and some authorization/permission checks).
(cherry picked from commit e9d638d0756ee20b6bf1eb999c988533a5066a68)
[MODERATION] s/{{avatar/{{ctx.AvatarUtils.Avatar/
(cherry picked from commit ce8b30be1327ab98df2ba061dd7e2a278b278c5b)
(cherry picked from commit f911dc402508b04cd5d5fb2f3332c2d640e4556e)
Conflicts:
options/locale/locale_en-US.ini
https://codeberg.org/forgejo/forgejo/pulls/1354
(cherry picked from commit c1b37b7fdaf06ee60da341dff76d703990c08082)
(cherry picked from commit 856a2e09036adf56d987c6eee364c431bc37fb2e)
[MODERATION] Show graceful error on comment creation
- When someone is blocked by the repository owner or issue poster and
try to comment on that issue, they get shown a graceful error.
- Adds integration test.
(cherry picked from commit 490646302e1e3dc3c59c9d75938b4647b6873ce7)
(cherry picked from commit d3d88667cbb928a6ff80658eba8ef0c6c508c9e0)
(cherry picked from commit 6818de13a921753e082b7c3d64c23917cc884e4b)
[MODERATION] Show graceful error on comment creation (squash) typo
(cherry picked from commit 1588d4834a37a744f092f2aeea6c9ef4795d7356)
(cherry picked from commit d510ea52d091503e841d66f2f604348add8b4535)
(cherry picked from commit 8249e93a14f628bb0e89fe3be678e4966539944e)
[MODERATION] Refactor integration testing (squash)
- Motivation for this PR is that I'd noticed that a lot of repeated
calls are happening between the test functions and that certain tests
weren't using helper functions like `GetCSRF`, therefor this refactor of
the integration tests to keep it: clean, small and hopefully more
maintainable and understandable.
- There are now three integration tests: `TestBlockUser`,
`TestBlockUserFromOrganization` and `TestBlockActions` (and has been
moved in that order in the source code).
- `TestBlockUser` is for doing blocking related actions as an user and
`TestBlockUserFromOrganization` as an organisation, even though they
execute the same kind of tests they do not share any database calls or
logic and therefor it currently doesn't make sense to merge them
together (hopefully such oppurtinutiy might be presented in the future).
- `TestBlockActions` now contain all tests for actions that should be
blocked after blocking has happened, most tests now share the same doer
and blocked users and a extra fixture has been added to make this
possible for the comment test.
- Less code, more comments and more re-use between tests.
(cherry picked from commit ffb393213d2f1269aad3c019d039cf60d0fe4b10)
(cherry picked from commit 85505e0f815fede589c272d301c95204f9596985)
(cherry picked from commit 0f3cf17761f6caedb17550f69de96990c2090af1)
[MODERATION] Fix network error (squash)
- Fix network error toast messages on user actions such as follow and
unfollow. This happened because the javascript code now expects an JSON
to be returned, but this wasn't the case due to
cfa539e590127b4953b010fba3dea21c82a1714.
- The integration testing has been adjusted to instead test for the
returned flash cookie.
(cherry picked from commit 112bc25e548d317a4ee00f9efa9068794a733e3b)
(cherry picked from commit 1194fe4899eb39dcb9a2410032ad0cc67a62b92b)
(cherry picked from commit 9abb95a8441e227874fe156095349a3173cc5a81)
[MODERATION] Modernize frontend (squash)
- Unify blocked users list.
- Use the new flex list classes for blocked users list to avoid using
the CSS helper classes and thereby be consistent in the design.
- Fix the modal by using the new modal class.
- Remove the icon in the modal as looks too big in the new design.
- Fix avatar not displaying as it was passing the context where the user
should've been passed.
- Don't use italics for 'Blocked since' text.
- Use namelink template to display the user's name and homelink.
(cherry picked from commit ec935a16a319b14e819ead828d1d9875280d9259)
(cherry picked from commit 67f37c83461aa393c53a799918e9708cb9b89b30)
Conflicts:
models/user/follow.go
models/user/user_test.go
routers/api/v1/user/follower.go
routers/web/shared/user/header.go
routers/web/user/profile.go
templates/swagger/v1_json.tmpl
https://codeberg.org/forgejo/forgejo/pulls/1468
(cherry picked from commit 6a9626839c6342cd2767ea12757ee2f78eaf443b)
Conflicts:
tests/integration/api_nodeinfo_test.go
https://codeberg.org/forgejo/forgejo/pulls/1508#issuecomment-1242385
(cherry picked from commit 7378b251b481ed1e60e816caf8f649e8397ee5fc)
Conflicts:
models/fixtures/watch.yml
models/issues/reaction.go
models/issues/reaction_test.go
routers/api/v1/repo/issue_reaction.go
routers/web/repo/issue.go
services/issue/issue.go
https://codeberg.org/forgejo/forgejo/pulls/1547
(cherry picked from commit c2028930c101223820de0bbafc318e9394c347b8)
(cherry picked from commit d3f9134aeeef784586e8412e8dbba0a8fceb0cd4)
(cherry picked from commit 7afe154c5c40bcc65accdf51c9224b2f7627a684)
(cherry picked from commit 99ac7353eb1e834a77fe42aa89208791cc2364ff)
(cherry picked from commit a9cde00c5c25ea8c427967cb7ab57abb618e44cb)
Conflicts:
services/user/delete.go
https://codeberg.org/forgejo/forgejo/pulls/1736
(cherry picked from commit 008c0cc63d1a3b8eb694bffbf77a7b25c56afd57)
[DEADCODE] add exceptions
(cherry picked from commit 12ddd2b10e3309f6430b0af42855c6af832832ee)
[MODERATION] Remove deadcode (squash)
- Remove deadcode that's no longer used by Forgejo.
(cherry picked from commit 0faeab4fa9b0aa59f86760b24ecbc07815026c82)
[MODERATION] Add repo transfers to blocked functionality (squash)
- When someone gets blocked, remove all pending repository transfers
from the blocked user to the doer.
- Do not allow to start transferring repositories to the doer as blocked user.
- Added unit testing.
- Added integration testing.
(cherry picked from commit 8a3caac33013482ddbee2fa51510c6918ba54466)
(cherry picked from commit a92b4cfeb63b90eb2d90d0feb51cec62e0502d84)
(cherry picked from commit acaaaf07d999974dbe5f9c5e792621c597bfb542)
(cherry picked from commit 735818863c1793aa6f6983afedc4bd3b36026ca5)
(cherry picked from commit f50fa43b32160d0d88eca1dbdca09b5f575fb62b)
(cherry picked from commit e16683643388fb3c60ea478f1419a6af4f4aa283)
(cherry picked from commit 82a0e4a3814a66ce44be6a031bdf08484586c61b)
(cherry picked from commit ff233c19c4a5edcc2b99a6f41a2d19dbe8c08b3b)
(cherry picked from commit 8ad87d215f2b6adb978de77e53ba2bf7ea571430)
[MODERATION] Fix unblock action (squash)
- Pass the whole context instead of only giving pieces.
- This fixes CSRF not correctly being inserted into the unblock buttons.
(cherry picked from commit 2aa51922ba6a0ea2f8644277baa74fc8f34ab95a)
(cherry picked from commit 7ee8db0f018340bc97f125415503e3e5db5f5082)
(cherry picked from commit e4f8b999bcd3b68b3ef7f54f5b17c3ada0308121)
(cherry picked from commit 05aea60b1302bbd3ea574a9c6c34e1005a5d73bf)
(cherry picked from commit dc0d61b012cfaf2385f71e97cda5f220b58b9fa4)
(cherry picked from commit f53fa583de671ff60a0a1d0f3ab8c260e1ba4e1f)
(cherry picked from commit c65b89a58d11b32009c710c2f5e75f0cd3539395)
(cherry picked from commit 69e50b9969db3ab71cefaed520757876a9629a5c)
(cherry picked from commit ec127440b86cb5fcf51799d8bd76a9fd6b9cebcc)
[MODERATION] cope with shared fixtures
* There is one more issue in the fixtures and this breaks some tests
* The users in the shared fixtures were renamed for clarity and that
breaks some tests
(cherry picked from commit 707a4edbdf67d0eb168d7bb430cf85dd8cd63c52)
Conflicts:
modules/indexer/issues/indexer_test.go
https://codeberg.org/forgejo/forgejo/pulls/1508
(cherry picked from commit 82cc044366c749df80ffad44eed2988b8e64211e)
(cherry picked from commit 2776aec7e85850f1d7f01a090a72491550fb9d29)
(cherry picked from commit 1fbde36dc784b5b2cc6193f02ff0d436b0f2a629)
(cherry picked from commit 1293db3c4e5df218501f5add9f9d41101ffcb8aa)
(cherry picked from commit 6476802175bac3ef78dd8f24ff6bebc16f398a78)
(cherry picked from commit 5740f2fc830356acb7929a02fe304008b94a0ca5)
(cherry picked from commit afc12d7b6e9b773fa89718aa79cd95c0e0ce4406)
[MODERATION] Fix transfer confirmation (squash)
- Fix problem caused by the clearer confirmation for dangerous actions commit.
(cherry picked from commit 3488f4a9cb1f7f73103ae0017d644f13ca3ab798)
(cherry picked from commit ed7de91f6ace23a1459bc6552edf719d62c7c941)
(cherry picked from commit 2d97929b9b7b8d979eb12bf0994d3f169d41f7fd)
(cherry picked from commit 50d035a7b058b9c4486c38cd4be0b02a4e1bf4d9)
(cherry picked from commit 0a0c07d78a1dee3489b97ab359bb957e3f7fb94b)
(cherry picked from commit 85e55c4dbc2f513f3d5254dac20915e8c3c22886)
(cherry picked from commit d8282122ad6e8b497de35d1ed89e3093a2cd5ee2)
(cherry picked from commit 3f0b3b6cc582c3d672d371dd9fe1203a56cb88c0)
[MODERATION] Purge issues on user deletion (squash)
(cherry picked from commit 4f529d9596ffbfc4e754c28830ba028f6344dc5b)
(cherry picked from commit f0e3acadd321fcb99e8ea3e3ce1c69df25c4ca4d)
(cherry picked from commit 682c4effe69dc0d4ed304fa7ce6259d9ce573629)
(cherry picked from commit e43c2d84fd4b6fd31e2370cec1034262d12e5c34)
(cherry picked from commit 9c8e53ccc78053026e4f667889959c23c8d95934)
(cherry picked from commit a9eb7ac783b2c16ee3702a88203bf857cb4147fc)
[MODERATION] Purge issues on user deletion (squash) revert shared fixtures workarounds
(cherry picked from commit 7224653a40e32186892e89bfedd49edecf5b8f81)
(cherry picked from commit aa6e8672f9473a9100e7575051dec9eda37709a0)
(cherry picked from commit 58c7947e95648f50237ddcd46b6bd025b224a70f)
(cherry picked from commit f1aacb1851b232082febcd7870a40a56de3855a6)
(cherry picked from commit 0bf174af87f7de9a8d869304f709e2bf41f3dde9)
(cherry picked from commit f9706f4335df3b7688ed60853d917efa72fb464a)
[MODERATION] Prepare moderation for context locale changes (squash)
- Resolves https://codeberg.org/forgejo/forgejo/issues/1711
(cherry picked from commit 2e289baea943dcece88f02d110b03d344308a261)
(cherry picked from commit 97b16bc19ae680db62608d6020b00fe5ac451c60)
[MODERATION] User blocking (squash) do not use shared fixture
It conflicts with a fixtured added in the commit
Fix comment permissions (#28213) (#28216)
(cherry picked from commit ab40799dcab24e9f495d765268b791931da81684)
(cherry picked from commit 996c92cafdb5b33a6d2d05d94038e950d97eb7de)
(cherry picked from commit 259912e3a69071c5ad57871464d0b79f69a8e72c)
Conflicts:
options/locale/locale_en-US.ini
https://codeberg.org/forgejo/forgejo/pulls/1921
(cherry picked from commit 1e82abc032c18015b92c93a7617a5dd06d50bd2d)
(cherry picked from commit a176fee1607d571b25b345184f1c50d403029610)
(cherry picked from commit 0480b76dfeda968849e900da9454a3efd82590fa)
(cherry picked from commit 4bc06b7b3841c74e3d790b1ef635c2b382ca7123)
(cherry picked from commit 073094cf722a927a623408d66537c758d7d64e4c)
(cherry picked from commit ac6201c647a4d3a2cfb2b0303b851a8fe7a29444)
(cherry picked from commit 7e0812674da3fbd1e96bdda820962edad6826fbd)
(cherry picked from commit 068c741e5696957710b3d1c2e18c00be2ffaa278)
Conflicts:
models/repo_transfer.go
models/repo_transfer_test.go
routers/web/user/profile.go
https://codeberg.org/forgejo/forgejo/pulls/2298
2023-08-15 04:37:38 +05:30
reaction , err := issue_service . CreateIssueReaction ( ctx , ctx . Doer , issue , form . Content )
2017-12-04 04:44:26 +05:30
if err != nil {
2022-03-31 14:50:39 +05:30
if issues_model . IsErrForbiddenIssueReaction ( err ) {
2019-12-08 03:34:19 +05:30
ctx . ServerError ( "ChangeIssueReaction" , err )
return
}
2017-12-04 04:44:26 +05:30
log . Info ( "CreateIssueReaction: %s" , err )
break
}
// Reload new reactions
issue . Reactions = nil
2022-06-13 15:07:59 +05:30
if err = issue . LoadAttributes ( ctx ) ; err != nil {
2017-12-04 04:44:26 +05:30
log . Info ( "issue.LoadAttributes: %s" , err )
break
}
log . Trace ( "Reaction for issue created: %d/%d/%d" , ctx . Repo . Repository . ID , issue . ID , reaction . ID )
case "unreact" :
2023-09-25 18:47:37 +05:30
if err := issues_model . DeleteIssueReaction ( ctx , ctx . Doer . ID , issue . ID , form . Content ) ; err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "DeleteIssueReaction" , err )
2017-12-04 04:44:26 +05:30
return
}
// Reload new reactions
issue . Reactions = nil
2022-06-13 15:07:59 +05:30
if err := issue . LoadAttributes ( ctx ) ; err != nil {
2017-12-04 04:44:26 +05:30
log . Info ( "issue.LoadAttributes: %s" , err )
break
}
log . Trace ( "Reaction for issue removed: %d/%d" , ctx . Repo . Repository . ID , issue . ID )
default :
2018-01-11 03:04:17 +05:30
ctx . NotFound ( fmt . Sprintf ( "Unknown action %s" , ctx . Params ( ":action" ) ) , nil )
2017-12-04 04:44:26 +05:30
return
}
if len ( issue . Reactions ) == 0 {
2023-07-05 00:06:08 +05:30
ctx . JSON ( http . StatusOK , map [ string ] any {
2017-12-04 04:44:26 +05:30
"empty" : true ,
"html" : "" ,
} )
return
}
2024-03-02 20:35:07 +05:30
html , err := ctx . RenderToHTML ( tplReactions , map [ string ] any {
2023-03-02 23:14:06 +05:30
"ctxData" : ctx . Data ,
2017-12-04 04:44:26 +05:30
"ActionURL" : fmt . Sprintf ( "%s/issues/%d/reactions" , ctx . Repo . RepoLink , issue . Index ) ,
"Reactions" : issue . Reactions . GroupByType ( ) ,
} )
if err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "ChangeIssueReaction.HTMLString" , err )
2017-12-04 04:44:26 +05:30
return
}
2023-07-05 00:06:08 +05:30
ctx . JSON ( http . StatusOK , map [ string ] any {
2017-12-04 04:44:26 +05:30
"html" : html ,
} )
}
// ChangeCommentReaction create a reaction for comment
2021-01-26 21:06:53 +05:30
func ChangeCommentReaction ( ctx * context . Context ) {
2021-04-07 01:14:05 +05:30
form := web . GetForm ( ctx ) . ( * forms . ReactionForm )
2022-06-13 15:07:59 +05:30
comment , err := issues_model . GetCommentByID ( ctx , ctx . ParamsInt64 ( ":id" ) )
2017-12-04 04:44:26 +05:30
if err != nil {
2022-06-13 15:07:59 +05:30
ctx . NotFoundOrServerError ( "GetCommentByID" , issues_model . IsErrCommentNotExist , err )
2017-12-04 04:44:26 +05:30
return
}
2022-11-19 13:42:33 +05:30
if err := comment . LoadIssue ( ctx ) ; err != nil {
2022-06-13 15:07:59 +05:30
ctx . NotFoundOrServerError ( "LoadIssue" , issues_model . IsErrIssueNotExist , err )
2017-12-04 04:44:26 +05:30
return
}
2023-11-25 22:51:21 +05:30
if comment . Issue . RepoID != ctx . Repo . Repository . ID {
ctx . NotFound ( "CompareRepoID" , issues_model . ErrCommentNotExist { } )
return
}
2022-03-22 12:33:22 +05:30
if ! ctx . IsSigned || ( ctx . Doer . ID != comment . PosterID && ! ctx . Repo . CanReadIssuesOrPulls ( comment . Issue . IsPull ) ) {
2019-04-23 02:10:51 +05:30
if log . IsTrace ( ) {
if ctx . IsSigned {
issueType := "issues"
if comment . Issue . IsPull {
issueType = "pulls"
}
log . Trace ( "Permission Denied: User %-v not the Poster (ID: %d) and cannot read %s in Repo %-v.\n" +
"User in Repo has Permissions: %-+v" ,
2022-03-22 12:33:22 +05:30
ctx . Doer ,
Rewrite logger system (#24726)
## ⚠️ Breaking
The `log.<mode>.<logger>` style config has been dropped. If you used it,
please check the new config manual & app.example.ini to make your
instance output logs as expected.
Although many legacy options still work, it's encouraged to upgrade to
the new options.
The SMTP logger is deleted because SMTP is not suitable to collect logs.
If you have manually configured Gitea log options, please confirm the
logger system works as expected after upgrading.
## Description
Close #12082 and maybe more log-related issues, resolve some related
FIXMEs in old code (which seems unfixable before)
Just like rewriting queue #24505 : make code maintainable, clear legacy
bugs, and add the ability to support more writers (eg: JSON, structured
log)
There is a new document (with examples): `logging-config.en-us.md`
This PR is safer than the queue rewriting, because it's just for
logging, it won't break other logic.
## The old problems
The logging system is quite old and difficult to maintain:
* Unclear concepts: Logger, NamedLogger, MultiChannelledLogger,
SubLogger, EventLogger, WriterLogger etc
* Some code is diffuclt to konw whether it is right:
`log.DelNamedLogger("console")` vs `log.DelNamedLogger(log.DEFAULT)` vs
`log.DelLogger("console")`
* The old system heavily depends on ini config system, it's difficult to
create new logger for different purpose, and it's very fragile.
* The "color" trick is difficult to use and read, many colors are
unnecessary, and in the future structured log could help
* It's difficult to add other log formats, eg: JSON format
* The log outputer doesn't have full control of its goroutine, it's
difficult to make outputer have advanced behaviors
* The logs could be lost in some cases: eg: no Fatal error when using
CLI.
* Config options are passed by JSON, which is quite fragile.
* INI package makes the KEY in `[log]` section visible in `[log.sub1]`
and `[log.sub1.subA]`, this behavior is quite fragile and would cause
more unclear problems, and there is no strong requirement to support
`log.<mode>.<logger>` syntax.
## The new design
See `logger.go` for documents.
## Screenshot
<details>
![image](https://github.com/go-gitea/gitea/assets/2114189/4462d713-ba39-41f5-bb08-de912e67e1ff)
![image](https://github.com/go-gitea/gitea/assets/2114189/b188035e-f691-428b-8b2d-ff7b2199b2f9)
![image](https://github.com/go-gitea/gitea/assets/2114189/132e9745-1c3b-4e00-9e0d-15eaea495dee)
</details>
## TODO
* [x] add some new tests
* [x] fix some tests
* [x] test some sub-commands (manually ....)
---------
Co-authored-by: Jason Song <i@wolfogre.com>
Co-authored-by: delvh <dev.lh@web.de>
Co-authored-by: Giteabot <teabot@gitea.io>
2023-05-22 04:05:11 +05:30
comment . Issue . PosterID ,
2019-04-23 02:10:51 +05:30
issueType ,
ctx . Repo . Repository ,
ctx . Repo . Permission )
} else {
log . Trace ( "Permission Denied: Not logged in" )
}
}
2021-04-05 21:00:52 +05:30
ctx . Error ( http . StatusForbidden )
2018-11-28 16:56:14 +05:30
return
2022-01-18 22:58:38 +05:30
}
2023-04-20 12:09:44 +05:30
if ! comment . Type . HasContentSupport ( ) {
2021-04-05 21:00:52 +05:30
ctx . Error ( http . StatusNoContent )
2017-12-04 04:44:26 +05:30
return
}
switch ctx . Params ( ":action" ) {
case "react" :
[MODERATION] User blocking
- Add the ability to block a user via their profile page.
- This will unstar their repositories and visa versa.
- Blocked users cannot create issues or pull requests on your the doer's repositories (mind that this is not the case for organizations).
- Blocked users cannot comment on the doer's opened issues or pull requests.
- Blocked users cannot add reactions to doer's comments.
- Blocked users cannot cause a notification trough mentioning the doer.
Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/540
(cherry picked from commit 687d852480388897db4d7b0cb397cf7135ab97b1)
(cherry picked from commit 0c32a4fde531018f74e01d9db6520895fcfa10cc)
(cherry picked from commit 1791130e3cb8470b9b39742e0004d5e4c7d1e64d)
(cherry picked from commit 37858b7e8fb6ba6c6ea0ac2562285b3b144efa19)
(cherry picked from commit a3e2bfd7e9eab82cc2c17061f6bb4e386a108c46)
(cherry picked from commit 7009b9fe87696b6182fab65ae82bf5a25cd39971)
Conflicts: https://codeberg.org/forgejo/forgejo/pulls/1014
routers/web/user/profile.go
templates/user/profile.tmpl
(cherry picked from commit b2aec3479177e725cfc7cbbb9d94753226928d1c)
(cherry picked from commit e2f1b73752f6bd3f830297d8f4ac438837471226)
[MODERATION] organization blocking a user (#802)
- Resolves #476
- Follow up for: #540
- Ensure that the doer and blocked person cannot follow each other.
- Ensure that the block person cannot watch doer's repositories.
- Add unblock button to the blocked user list.
- Add blocked since information to the blocked user list.
- Add extra testing to moderation code.
- Blocked user will unwatch doer's owned repository upon blocking.
- Add flash messages to let the user know the block/unblock action was successful.
- Add "You haven't blocked any users" message.
- Add organization blocking a user.
Co-authored-by: Gusted <postmaster@gusted.xyz>
Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/802
(cherry picked from commit 0505a1042197bd9136b58bc70ec7400a23471585)
(cherry picked from commit 37b4e6ef9b85e97d651cf350c9f3ea272ee8d76a)
(cherry picked from commit c17c121f2cf1f00e2a8d6fd6847705df47d0771e)
[MODERATION] organization blocking a user (#802) (squash)
Changes to adapt to:
6bbccdd177 Improve AJAX link and modal confirm dialog (#25210)
Refs: https://codeberg.org/forgejo/forgejo/pulls/882/files#issuecomment-945962
Refs: https://codeberg.org/forgejo/forgejo/pulls/882#issue-330561
(cherry picked from commit 523635f83cb2a1a4386769b79326088c5c4bbec7)
(cherry picked from commit 4743eaa6a0be0ef47de5b17c211dfe8bad1b7af9)
(cherry picked from commit eff5b43d2e843d5d537756d4fa58a8a010b6b527)
Conflicts: https://codeberg.org/forgejo/forgejo/pulls/1014
routers/web/user/profile.go
(cherry picked from commit 9d359be5ed11237088ccf6328571939af814984e)
(cherry picked from commit b1f3069a22a03734cffbfcd503ce004ba47561b7)
[MODERATION] add user blocking API
- Follow up for: #540, #802
- Add API routes for user blocking from user and organization
perspective.
- The new routes have integration testing.
- The new model functions have unit tests.
- Actually quite boring to write and to read this pull request.
(cherry picked from commit f3afaf15c7e34038363c9ce8e1ef957ec1e22b06)
(cherry picked from commit 6d754db3e5faff93a58fab2867737f81f40f6599)
(cherry picked from commit 2a89ddc0acffa9aea0f02b721934ef9e2b496a88)
(cherry picked from commit 4a147bff7e963ab9dffcfaefa5c2c01c59b4c732)
Conflicts:
routers/api/v1/api.go
templates/swagger/v1_json.tmpl
(cherry picked from commit bb8c33918569f65f25b014f0d7fe6ac20f9036fc)
(cherry picked from commit 5a11569a011b7d0a14391e2b5c07d0af825d7b0e)
(cherry picked from commit 2373c801ee6b84c368b498b16e6ad18650b38f42)
[MODERATION] restore redirect on unblock
ctx.RedirectToFirst(ctx.FormString("redirect_to"), ctx.ContextUser.HomeLink())
was replaced by
ctx.JSONOK()
in 128d77a3a Following up fixes for "Fix inconsistent user profile layout across tabs" (#25739)
thus changing the behavior (nicely spotted by the tests). This
restores it.
(cherry picked from commit 597c243707c3c86e7256faf1e6ba727224554de3)
(cherry picked from commit cfa539e590127b4b953b010fba3dea21c82a1714)
[MODERATION] Add test case (squash)
- Add an test case, to test an property of the function.
(cherry picked from commit 70dadb1916bfef8ba8cbc4e9b042cc8740f45e28)
[MODERATION] Block adding collaborators
- Ensure that the doer and blocked user cannot add each other as
collaborators to repositories.
- The Web UI gets an detailed message of the specific situation, the API
gets an generic Forbidden code.
- Unit tests has been added.
- Integration testing for Web and API has been added.
- This commit doesn't introduce removing each other as collaborators on
the block action, due to the complexity of database calls that needs to
be figured out. That deserves its own commit and test code.
(cherry picked from commit 747be949a1b3cd06f6586512f1af4630e55d7ad4)
[MODERATION] move locale_en-US.ini strings to avoid conflicts
Conflicts:
web_src/css/org.css
web_src/css/user.css
https://codeberg.org/forgejo/forgejo/pulls/1180
(cherry picked from commit e53f955c888ebaafc863a6e463da87f70f5605da)
Conflicts:
services/issue/comments.go
https://codeberg.org/forgejo/forgejo/pulls/1212
(cherry picked from commit b4a454b576eee0c7738b2f7df1acaf5bf7810d12)
Conflicts:
models/forgejo_migrations/migrate.go
options/locale/locale_en-US.ini
services/pull/pull.go
https://codeberg.org/forgejo/forgejo/pulls/1264
[MODERATION] Remove blocked user collaborations with doer
- When the doer blocks an user, who is also an collaborator on an
repository that the doer owns, remove that collaboration.
- Added unit tests.
- Refactor the unit test to be more organized.
(cherry picked from commit ec8701617830152680d69d50d64cb43cc2054a89)
(cherry picked from commit 313e6174d832501c57724ae7a6285194b7b81aab)
[MODERATION] QoL improvements (squash)
- Ensure that organisations cannot be blocked. It currently has no
effect, as all blocked operations cannot be executed from an
organisation standpoint.
- Refactored the API route to make use of the `UserAssignmentAPI`
middleware.
- Make more use of `t.Run` so that the test code is more clear about
which block of code belongs to which test case.
- Added more integration testing (to ensure the organisations cannot be
blocked and some authorization/permission checks).
(cherry picked from commit e9d638d0756ee20b6bf1eb999c988533a5066a68)
[MODERATION] s/{{avatar/{{ctx.AvatarUtils.Avatar/
(cherry picked from commit ce8b30be1327ab98df2ba061dd7e2a278b278c5b)
(cherry picked from commit f911dc402508b04cd5d5fb2f3332c2d640e4556e)
Conflicts:
options/locale/locale_en-US.ini
https://codeberg.org/forgejo/forgejo/pulls/1354
(cherry picked from commit c1b37b7fdaf06ee60da341dff76d703990c08082)
(cherry picked from commit 856a2e09036adf56d987c6eee364c431bc37fb2e)
[MODERATION] Show graceful error on comment creation
- When someone is blocked by the repository owner or issue poster and
try to comment on that issue, they get shown a graceful error.
- Adds integration test.
(cherry picked from commit 490646302e1e3dc3c59c9d75938b4647b6873ce7)
(cherry picked from commit d3d88667cbb928a6ff80658eba8ef0c6c508c9e0)
(cherry picked from commit 6818de13a921753e082b7c3d64c23917cc884e4b)
[MODERATION] Show graceful error on comment creation (squash) typo
(cherry picked from commit 1588d4834a37a744f092f2aeea6c9ef4795d7356)
(cherry picked from commit d510ea52d091503e841d66f2f604348add8b4535)
(cherry picked from commit 8249e93a14f628bb0e89fe3be678e4966539944e)
[MODERATION] Refactor integration testing (squash)
- Motivation for this PR is that I'd noticed that a lot of repeated
calls are happening between the test functions and that certain tests
weren't using helper functions like `GetCSRF`, therefor this refactor of
the integration tests to keep it: clean, small and hopefully more
maintainable and understandable.
- There are now three integration tests: `TestBlockUser`,
`TestBlockUserFromOrganization` and `TestBlockActions` (and has been
moved in that order in the source code).
- `TestBlockUser` is for doing blocking related actions as an user and
`TestBlockUserFromOrganization` as an organisation, even though they
execute the same kind of tests they do not share any database calls or
logic and therefor it currently doesn't make sense to merge them
together (hopefully such oppurtinutiy might be presented in the future).
- `TestBlockActions` now contain all tests for actions that should be
blocked after blocking has happened, most tests now share the same doer
and blocked users and a extra fixture has been added to make this
possible for the comment test.
- Less code, more comments and more re-use between tests.
(cherry picked from commit ffb393213d2f1269aad3c019d039cf60d0fe4b10)
(cherry picked from commit 85505e0f815fede589c272d301c95204f9596985)
(cherry picked from commit 0f3cf17761f6caedb17550f69de96990c2090af1)
[MODERATION] Fix network error (squash)
- Fix network error toast messages on user actions such as follow and
unfollow. This happened because the javascript code now expects an JSON
to be returned, but this wasn't the case due to
cfa539e590127b4953b010fba3dea21c82a1714.
- The integration testing has been adjusted to instead test for the
returned flash cookie.
(cherry picked from commit 112bc25e548d317a4ee00f9efa9068794a733e3b)
(cherry picked from commit 1194fe4899eb39dcb9a2410032ad0cc67a62b92b)
(cherry picked from commit 9abb95a8441e227874fe156095349a3173cc5a81)
[MODERATION] Modernize frontend (squash)
- Unify blocked users list.
- Use the new flex list classes for blocked users list to avoid using
the CSS helper classes and thereby be consistent in the design.
- Fix the modal by using the new modal class.
- Remove the icon in the modal as looks too big in the new design.
- Fix avatar not displaying as it was passing the context where the user
should've been passed.
- Don't use italics for 'Blocked since' text.
- Use namelink template to display the user's name and homelink.
(cherry picked from commit ec935a16a319b14e819ead828d1d9875280d9259)
(cherry picked from commit 67f37c83461aa393c53a799918e9708cb9b89b30)
Conflicts:
models/user/follow.go
models/user/user_test.go
routers/api/v1/user/follower.go
routers/web/shared/user/header.go
routers/web/user/profile.go
templates/swagger/v1_json.tmpl
https://codeberg.org/forgejo/forgejo/pulls/1468
(cherry picked from commit 6a9626839c6342cd2767ea12757ee2f78eaf443b)
Conflicts:
tests/integration/api_nodeinfo_test.go
https://codeberg.org/forgejo/forgejo/pulls/1508#issuecomment-1242385
(cherry picked from commit 7378b251b481ed1e60e816caf8f649e8397ee5fc)
Conflicts:
models/fixtures/watch.yml
models/issues/reaction.go
models/issues/reaction_test.go
routers/api/v1/repo/issue_reaction.go
routers/web/repo/issue.go
services/issue/issue.go
https://codeberg.org/forgejo/forgejo/pulls/1547
(cherry picked from commit c2028930c101223820de0bbafc318e9394c347b8)
(cherry picked from commit d3f9134aeeef784586e8412e8dbba0a8fceb0cd4)
(cherry picked from commit 7afe154c5c40bcc65accdf51c9224b2f7627a684)
(cherry picked from commit 99ac7353eb1e834a77fe42aa89208791cc2364ff)
(cherry picked from commit a9cde00c5c25ea8c427967cb7ab57abb618e44cb)
Conflicts:
services/user/delete.go
https://codeberg.org/forgejo/forgejo/pulls/1736
(cherry picked from commit 008c0cc63d1a3b8eb694bffbf77a7b25c56afd57)
[DEADCODE] add exceptions
(cherry picked from commit 12ddd2b10e3309f6430b0af42855c6af832832ee)
[MODERATION] Remove deadcode (squash)
- Remove deadcode that's no longer used by Forgejo.
(cherry picked from commit 0faeab4fa9b0aa59f86760b24ecbc07815026c82)
[MODERATION] Add repo transfers to blocked functionality (squash)
- When someone gets blocked, remove all pending repository transfers
from the blocked user to the doer.
- Do not allow to start transferring repositories to the doer as blocked user.
- Added unit testing.
- Added integration testing.
(cherry picked from commit 8a3caac33013482ddbee2fa51510c6918ba54466)
(cherry picked from commit a92b4cfeb63b90eb2d90d0feb51cec62e0502d84)
(cherry picked from commit acaaaf07d999974dbe5f9c5e792621c597bfb542)
(cherry picked from commit 735818863c1793aa6f6983afedc4bd3b36026ca5)
(cherry picked from commit f50fa43b32160d0d88eca1dbdca09b5f575fb62b)
(cherry picked from commit e16683643388fb3c60ea478f1419a6af4f4aa283)
(cherry picked from commit 82a0e4a3814a66ce44be6a031bdf08484586c61b)
(cherry picked from commit ff233c19c4a5edcc2b99a6f41a2d19dbe8c08b3b)
(cherry picked from commit 8ad87d215f2b6adb978de77e53ba2bf7ea571430)
[MODERATION] Fix unblock action (squash)
- Pass the whole context instead of only giving pieces.
- This fixes CSRF not correctly being inserted into the unblock buttons.
(cherry picked from commit 2aa51922ba6a0ea2f8644277baa74fc8f34ab95a)
(cherry picked from commit 7ee8db0f018340bc97f125415503e3e5db5f5082)
(cherry picked from commit e4f8b999bcd3b68b3ef7f54f5b17c3ada0308121)
(cherry picked from commit 05aea60b1302bbd3ea574a9c6c34e1005a5d73bf)
(cherry picked from commit dc0d61b012cfaf2385f71e97cda5f220b58b9fa4)
(cherry picked from commit f53fa583de671ff60a0a1d0f3ab8c260e1ba4e1f)
(cherry picked from commit c65b89a58d11b32009c710c2f5e75f0cd3539395)
(cherry picked from commit 69e50b9969db3ab71cefaed520757876a9629a5c)
(cherry picked from commit ec127440b86cb5fcf51799d8bd76a9fd6b9cebcc)
[MODERATION] cope with shared fixtures
* There is one more issue in the fixtures and this breaks some tests
* The users in the shared fixtures were renamed for clarity and that
breaks some tests
(cherry picked from commit 707a4edbdf67d0eb168d7bb430cf85dd8cd63c52)
Conflicts:
modules/indexer/issues/indexer_test.go
https://codeberg.org/forgejo/forgejo/pulls/1508
(cherry picked from commit 82cc044366c749df80ffad44eed2988b8e64211e)
(cherry picked from commit 2776aec7e85850f1d7f01a090a72491550fb9d29)
(cherry picked from commit 1fbde36dc784b5b2cc6193f02ff0d436b0f2a629)
(cherry picked from commit 1293db3c4e5df218501f5add9f9d41101ffcb8aa)
(cherry picked from commit 6476802175bac3ef78dd8f24ff6bebc16f398a78)
(cherry picked from commit 5740f2fc830356acb7929a02fe304008b94a0ca5)
(cherry picked from commit afc12d7b6e9b773fa89718aa79cd95c0e0ce4406)
[MODERATION] Fix transfer confirmation (squash)
- Fix problem caused by the clearer confirmation for dangerous actions commit.
(cherry picked from commit 3488f4a9cb1f7f73103ae0017d644f13ca3ab798)
(cherry picked from commit ed7de91f6ace23a1459bc6552edf719d62c7c941)
(cherry picked from commit 2d97929b9b7b8d979eb12bf0994d3f169d41f7fd)
(cherry picked from commit 50d035a7b058b9c4486c38cd4be0b02a4e1bf4d9)
(cherry picked from commit 0a0c07d78a1dee3489b97ab359bb957e3f7fb94b)
(cherry picked from commit 85e55c4dbc2f513f3d5254dac20915e8c3c22886)
(cherry picked from commit d8282122ad6e8b497de35d1ed89e3093a2cd5ee2)
(cherry picked from commit 3f0b3b6cc582c3d672d371dd9fe1203a56cb88c0)
[MODERATION] Purge issues on user deletion (squash)
(cherry picked from commit 4f529d9596ffbfc4e754c28830ba028f6344dc5b)
(cherry picked from commit f0e3acadd321fcb99e8ea3e3ce1c69df25c4ca4d)
(cherry picked from commit 682c4effe69dc0d4ed304fa7ce6259d9ce573629)
(cherry picked from commit e43c2d84fd4b6fd31e2370cec1034262d12e5c34)
(cherry picked from commit 9c8e53ccc78053026e4f667889959c23c8d95934)
(cherry picked from commit a9eb7ac783b2c16ee3702a88203bf857cb4147fc)
[MODERATION] Purge issues on user deletion (squash) revert shared fixtures workarounds
(cherry picked from commit 7224653a40e32186892e89bfedd49edecf5b8f81)
(cherry picked from commit aa6e8672f9473a9100e7575051dec9eda37709a0)
(cherry picked from commit 58c7947e95648f50237ddcd46b6bd025b224a70f)
(cherry picked from commit f1aacb1851b232082febcd7870a40a56de3855a6)
(cherry picked from commit 0bf174af87f7de9a8d869304f709e2bf41f3dde9)
(cherry picked from commit f9706f4335df3b7688ed60853d917efa72fb464a)
[MODERATION] Prepare moderation for context locale changes (squash)
- Resolves https://codeberg.org/forgejo/forgejo/issues/1711
(cherry picked from commit 2e289baea943dcece88f02d110b03d344308a261)
(cherry picked from commit 97b16bc19ae680db62608d6020b00fe5ac451c60)
[MODERATION] User blocking (squash) do not use shared fixture
It conflicts with a fixtured added in the commit
Fix comment permissions (#28213) (#28216)
(cherry picked from commit ab40799dcab24e9f495d765268b791931da81684)
(cherry picked from commit 996c92cafdb5b33a6d2d05d94038e950d97eb7de)
(cherry picked from commit 259912e3a69071c5ad57871464d0b79f69a8e72c)
Conflicts:
options/locale/locale_en-US.ini
https://codeberg.org/forgejo/forgejo/pulls/1921
(cherry picked from commit 1e82abc032c18015b92c93a7617a5dd06d50bd2d)
(cherry picked from commit a176fee1607d571b25b345184f1c50d403029610)
(cherry picked from commit 0480b76dfeda968849e900da9454a3efd82590fa)
(cherry picked from commit 4bc06b7b3841c74e3d790b1ef635c2b382ca7123)
(cherry picked from commit 073094cf722a927a623408d66537c758d7d64e4c)
(cherry picked from commit ac6201c647a4d3a2cfb2b0303b851a8fe7a29444)
(cherry picked from commit 7e0812674da3fbd1e96bdda820962edad6826fbd)
(cherry picked from commit 068c741e5696957710b3d1c2e18c00be2ffaa278)
Conflicts:
models/repo_transfer.go
models/repo_transfer_test.go
routers/web/user/profile.go
https://codeberg.org/forgejo/forgejo/pulls/2298
2023-08-15 04:37:38 +05:30
reaction , err := issue_service . CreateCommentReaction ( ctx , ctx . Doer , comment . Issue , comment , form . Content )
2017-12-04 04:44:26 +05:30
if err != nil {
2022-03-31 14:50:39 +05:30
if issues_model . IsErrForbiddenIssueReaction ( err ) {
2019-12-08 03:34:19 +05:30
ctx . ServerError ( "ChangeIssueReaction" , err )
return
}
2017-12-04 04:44:26 +05:30
log . Info ( "CreateCommentReaction: %s" , err )
break
}
// Reload new reactions
comment . Reactions = nil
2023-09-29 17:42:54 +05:30
if err = comment . LoadReactions ( ctx , ctx . Repo . Repository ) ; err != nil {
2017-12-04 04:44:26 +05:30
log . Info ( "comment.LoadReactions: %s" , err )
break
}
2018-11-28 16:56:14 +05:30
log . Trace ( "Reaction for comment created: %d/%d/%d/%d" , ctx . Repo . Repository . ID , comment . Issue . ID , comment . ID , reaction . ID )
2017-12-04 04:44:26 +05:30
case "unreact" :
2023-09-25 18:47:37 +05:30
if err := issues_model . DeleteCommentReaction ( ctx , ctx . Doer . ID , comment . Issue . ID , comment . ID , form . Content ) ; err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "DeleteCommentReaction" , err )
2017-12-04 04:44:26 +05:30
return
}
// Reload new reactions
comment . Reactions = nil
2023-09-29 17:42:54 +05:30
if err = comment . LoadReactions ( ctx , ctx . Repo . Repository ) ; err != nil {
2017-12-04 04:44:26 +05:30
log . Info ( "comment.LoadReactions: %s" , err )
break
}
2018-11-28 16:56:14 +05:30
log . Trace ( "Reaction for comment removed: %d/%d/%d" , ctx . Repo . Repository . ID , comment . Issue . ID , comment . ID )
2017-12-04 04:44:26 +05:30
default :
2018-01-11 03:04:17 +05:30
ctx . NotFound ( fmt . Sprintf ( "Unknown action %s" , ctx . Params ( ":action" ) ) , nil )
2017-12-04 04:44:26 +05:30
return
}
if len ( comment . Reactions ) == 0 {
2023-07-05 00:06:08 +05:30
ctx . JSON ( http . StatusOK , map [ string ] any {
2017-12-04 04:44:26 +05:30
"empty" : true ,
"html" : "" ,
} )
return
}
2024-03-02 20:35:07 +05:30
html , err := ctx . RenderToHTML ( tplReactions , map [ string ] any {
2023-03-02 23:14:06 +05:30
"ctxData" : ctx . Data ,
2017-12-04 04:44:26 +05:30
"ActionURL" : fmt . Sprintf ( "%s/comments/%d/reactions" , ctx . Repo . RepoLink , comment . ID ) ,
"Reactions" : comment . Reactions . GroupByType ( ) ,
} )
if err != nil {
2018-01-11 03:04:17 +05:30
ctx . ServerError ( "ChangeCommentReaction.HTMLString" , err )
2017-12-04 04:44:26 +05:30
return
}
2023-07-05 00:06:08 +05:30
ctx . JSON ( http . StatusOK , map [ string ] any {
2017-12-04 04:44:26 +05:30
"html" : html ,
} )
}
2019-09-07 20:23:35 +05:30
2021-11-24 15:19:20 +05:30
func addParticipant ( poster * user_model . User , participants [ ] * user_model . User ) [ ] * user_model . User {
2019-09-07 20:23:35 +05:30
for _ , part := range participants {
if poster . ID == part . ID {
return participants
}
}
return append ( participants , poster )
}
2019-09-20 11:15:38 +05:30
2022-06-13 15:07:59 +05:30
func filterXRefComments ( ctx * context . Context , issue * issues_model . Issue ) error {
2019-09-20 11:15:38 +05:30
// Remove comments that the user has no permissions to see
for i := 0 ; i < len ( issue . Comments ) ; {
c := issue . Comments [ i ]
2022-06-13 15:07:59 +05:30
if issues_model . CommentTypeIsRef ( c . Type ) && c . RefRepoID != issue . RepoID && c . RefRepoID != 0 {
2019-09-20 11:15:38 +05:30
var err error
// Set RefRepo for description in template
2022-12-03 08:18:26 +05:30
c . RefRepo , err = repo_model . GetRepositoryByID ( ctx , c . RefRepoID )
2019-09-20 11:15:38 +05:30
if err != nil {
return err
}
2022-05-11 15:39:36 +05:30
perm , err := access_model . GetUserRepoPermission ( ctx , c . RefRepo , ctx . Doer )
2019-09-20 11:15:38 +05:30
if err != nil {
return err
}
if ! perm . CanReadIssuesOrPulls ( c . RefIsPull ) {
issue . Comments = append ( issue . Comments [ : i ] , issue . Comments [ i + 1 : ] ... )
continue
}
}
i ++
}
return nil
}
2019-10-15 17:49:32 +05:30
// GetIssueAttachments returns attachments for the issue
func GetIssueAttachments ( ctx * context . Context ) {
issue := GetActionIssue ( ctx )
2023-07-06 00:22:12 +05:30
if ctx . Written ( ) {
return
}
2022-01-20 23:16:10 +05:30
attachments := make ( [ ] * api . Attachment , len ( issue . Attachments ) )
2019-10-15 17:49:32 +05:30
for i := 0 ; i < len ( issue . Attachments ) ; i ++ {
2023-07-10 15:01:19 +05:30
attachments [ i ] = convert . ToAttachment ( ctx . Repo . Repository , issue . Attachments [ i ] )
2019-10-15 17:49:32 +05:30
}
2021-04-05 21:00:52 +05:30
ctx . JSON ( http . StatusOK , attachments )
2019-10-15 17:49:32 +05:30
}
// GetCommentAttachments returns attachments for the comment
func GetCommentAttachments ( ctx * context . Context ) {
2022-06-13 15:07:59 +05:30
comment , err := issues_model . GetCommentByID ( ctx , ctx . ParamsInt64 ( ":id" ) )
2019-10-15 17:49:32 +05:30
if err != nil {
2022-06-13 15:07:59 +05:30
ctx . NotFoundOrServerError ( "GetCommentByID" , issues_model . IsErrCommentNotExist , err )
2019-10-15 17:49:32 +05:30
return
}
2023-04-20 12:09:44 +05:30
2023-11-25 22:51:21 +05:30
if err := comment . LoadIssue ( ctx ) ; err != nil {
ctx . NotFoundOrServerError ( "LoadIssue" , issues_model . IsErrIssueNotExist , err )
return
}
if comment . Issue . RepoID != ctx . Repo . Repository . ID {
ctx . NotFound ( "CompareRepoID" , issues_model . ErrCommentNotExist { } )
return
}
if ! ctx . Repo . Permission . CanReadIssuesOrPulls ( comment . Issue . IsPull ) {
ctx . NotFound ( "CanReadIssuesOrPulls" , issues_model . ErrCommentNotExist { } )
return
}
2023-04-20 12:09:44 +05:30
if ! comment . Type . HasAttachmentSupport ( ) {
ctx . ServerError ( "GetCommentAttachments" , fmt . Errorf ( "comment type %v does not support attachments" , comment . Type ) )
return
}
2022-01-20 23:16:10 +05:30
attachments := make ( [ ] * api . Attachment , 0 )
2023-04-20 12:09:44 +05:30
if err := comment . LoadAttachments ( ctx ) ; err != nil {
ctx . ServerError ( "LoadAttachments" , err )
return
}
for i := 0 ; i < len ( comment . Attachments ) ; i ++ {
2023-07-10 15:01:19 +05:30
attachments = append ( attachments , convert . ToAttachment ( ctx . Repo . Repository , comment . Attachments [ i ] ) )
2019-10-15 17:49:32 +05:30
}
2021-04-05 21:00:52 +05:30
ctx . JSON ( http . StatusOK , attachments )
2019-10-15 17:49:32 +05:30
}
2023-07-05 00:06:08 +05:30
func updateAttachments ( ctx * context . Context , item any , files [ ] string ) error {
2021-11-19 19:09:57 +05:30
var attachments [ ] * repo_model . Attachment
2019-10-15 17:49:32 +05:30
switch content := item . ( type ) {
2022-06-13 15:07:59 +05:30
case * issues_model . Issue :
2019-10-15 17:49:32 +05:30
attachments = content . Attachments
2022-06-13 15:07:59 +05:30
case * issues_model . Comment :
2019-10-15 17:49:32 +05:30
attachments = content . Attachments
default :
2022-02-26 17:45:32 +05:30
return fmt . Errorf ( "unknown Type: %T" , content )
2019-10-15 17:49:32 +05:30
}
for i := 0 ; i < len ( attachments ) ; i ++ {
Improve utils of slices (#22379)
- Move the file `compare.go` and `slice.go` to `slice.go`.
- Fix `ExistsInSlice`, it's buggy
- It uses `sort.Search`, so it assumes that the input slice is sorted.
- It passes `func(i int) bool { return slice[i] == target })` to
`sort.Search`, that's incorrect, check the doc of `sort.Search`.
- Conbine `IsInt64InSlice(int64, []int64)` and `ExistsInSlice(string,
[]string)` to `SliceContains[T]([]T, T)`.
- Conbine `IsSliceInt64Eq([]int64, []int64)` and `IsEqualSlice([]string,
[]string)` to `SliceSortedEqual[T]([]T, T)`.
- Add `SliceEqual[T]([]T, T)` as a distinction from
`SliceSortedEqual[T]([]T, T)`.
- Redesign `RemoveIDFromList([]int64, int64) ([]int64, bool)` to
`SliceRemoveAll[T]([]T, T) []T`.
- Add `SliceContainsFunc[T]([]T, func(T) bool)` and
`SliceRemoveAllFunc[T]([]T, func(T) bool)` for general use.
- Add comments to explain why not `golang.org/x/exp/slices`.
- Add unit tests.
2023-01-11 11:01:16 +05:30
if util . SliceContainsString ( files , attachments [ i ] . UUID ) {
2019-10-15 17:49:32 +05:30
continue
}
2023-09-15 11:43:19 +05:30
if err := repo_model . DeleteAttachment ( ctx , attachments [ i ] , true ) ; err != nil {
2019-10-15 17:49:32 +05:30
return err
}
}
var err error
if len ( files ) > 0 {
switch content := item . ( type ) {
2022-06-13 15:07:59 +05:30
case * issues_model . Issue :
2023-09-29 17:42:54 +05:30
err = issues_model . UpdateIssueAttachments ( ctx , content . ID , files )
2022-06-13 15:07:59 +05:30
case * issues_model . Comment :
2023-09-29 17:42:54 +05:30
err = content . UpdateAttachments ( ctx , files )
2019-10-15 17:49:32 +05:30
default :
2022-02-26 17:45:32 +05:30
return fmt . Errorf ( "unknown Type: %T" , content )
2019-10-15 17:49:32 +05:30
}
if err != nil {
return err
}
}
switch content := item . ( type ) {
2022-06-13 15:07:59 +05:30
case * issues_model . Issue :
2022-05-20 19:38:52 +05:30
content . Attachments , err = repo_model . GetAttachmentsByIssueID ( ctx , content . ID )
2022-06-13 15:07:59 +05:30
case * issues_model . Comment :
2022-05-20 19:38:52 +05:30
content . Attachments , err = repo_model . GetAttachmentsByCommentID ( ctx , content . ID )
2019-10-15 17:49:32 +05:30
default :
2022-02-26 17:45:32 +05:30
return fmt . Errorf ( "unknown Type: %T" , content )
2019-10-15 17:49:32 +05:30
}
return err
}
2024-03-02 20:35:07 +05:30
func attachmentsHTML ( ctx * context . Context , attachments [ ] * repo_model . Attachment , content string ) template . HTML {
attachHTML , err := ctx . RenderToHTML ( tplAttachment , map [ string ] any {
2023-03-02 23:14:06 +05:30
"ctxData" : ctx . Data ,
2019-10-15 17:49:32 +05:30
"Attachments" : attachments ,
2020-12-14 00:42:27 +05:30
"Content" : content ,
2019-10-15 17:49:32 +05:30
} )
if err != nil {
ctx . ServerError ( "attachmentsHTML.HTMLString" , err )
return ""
}
return attachHTML
}
2020-10-26 03:19:48 +05:30
2021-03-05 20:47:32 +05:30
// combineLabelComments combine the nearby label comments as one.
2022-06-13 15:07:59 +05:30
func combineLabelComments ( issue * issues_model . Issue ) {
var prev , cur * issues_model . Comment
2020-11-21 03:59:09 +05:30
for i := 0 ; i < len ( issue . Comments ) ; i ++ {
2021-03-05 20:47:32 +05:30
cur = issue . Comments [ i ]
2020-11-21 03:59:09 +05:30
if i > 0 {
2020-10-26 03:19:48 +05:30
prev = issue . Comments [ i - 1 ]
}
2022-06-13 15:07:59 +05:30
if i == 0 || cur . Type != issues_model . CommentTypeLabel ||
2020-11-21 03:59:09 +05:30
( prev != nil && prev . PosterID != cur . PosterID ) ||
( prev != nil && cur . CreatedUnix - prev . CreatedUnix >= 60 ) {
2022-06-13 15:07:59 +05:30
if cur . Type == issues_model . CommentTypeLabel && cur . Label != nil {
2020-11-21 03:59:09 +05:30
if cur . Content != "1" {
cur . RemovedLabels = append ( cur . RemovedLabels , cur . Label )
2020-10-26 03:19:48 +05:30
} else {
2020-11-21 03:59:09 +05:30
cur . AddedLabels = append ( cur . AddedLabels , cur . Label )
2020-10-26 03:19:48 +05:30
}
}
2020-11-21 03:59:09 +05:30
continue
2020-10-26 03:19:48 +05:30
}
2020-11-21 03:59:09 +05:30
2021-03-05 20:47:32 +05:30
if cur . Label != nil { // now cur MUST be label comment
2022-06-13 15:07:59 +05:30
if prev . Type == issues_model . CommentTypeLabel { // we can combine them only prev is a label comment
2021-03-05 20:47:32 +05:30
if cur . Content != "1" {
2021-11-04 20:21:30 +05:30
// remove labels from the AddedLabels list if the label that was removed is already
// in this list, and if it's not in this list, add the label to RemovedLabels
addedAndRemoved := false
for i , label := range prev . AddedLabels {
if cur . Label . ID == label . ID {
prev . AddedLabels = append ( prev . AddedLabels [ : i ] , prev . AddedLabels [ i + 1 : ] ... )
addedAndRemoved = true
break
}
}
if ! addedAndRemoved {
prev . RemovedLabels = append ( prev . RemovedLabels , cur . Label )
}
2021-03-05 20:47:32 +05:30
} else {
2021-11-04 20:21:30 +05:30
// remove labels from the RemovedLabels list if the label that was added is already
// in this list, and if it's not in this list, add the label to AddedLabels
removedAndAdded := false
for i , label := range prev . RemovedLabels {
if cur . Label . ID == label . ID {
prev . RemovedLabels = append ( prev . RemovedLabels [ : i ] , prev . RemovedLabels [ i + 1 : ] ... )
removedAndAdded = true
break
}
}
if ! removedAndAdded {
prev . AddedLabels = append ( prev . AddedLabels , cur . Label )
}
2021-03-05 20:47:32 +05:30
}
prev . CreatedUnix = cur . CreatedUnix
// remove the current comment since it has been combined to prev comment
issue . Comments = append ( issue . Comments [ : i ] , issue . Comments [ i + 1 : ] ... )
i --
} else { // if prev is not a label comment, start a new group
if cur . Content != "1" {
cur . RemovedLabels = append ( cur . RemovedLabels , cur . Label )
} else {
cur . AddedLabels = append ( cur . AddedLabels , cur . Label )
}
2021-02-10 08:20:44 +05:30
}
2020-11-21 03:59:09 +05:30
}
2020-10-26 03:19:48 +05:30
}
}
2020-12-21 21:09:28 +05:30
// get all teams that current user can mention
func handleTeamMentions ( ctx * context . Context ) {
2022-03-22 12:33:22 +05:30
if ctx . Doer == nil || ! ctx . Repo . Owner . IsOrganization ( ) {
2020-12-21 21:09:28 +05:30
return
}
2021-11-19 17:11:40 +05:30
var isAdmin bool
2020-12-21 21:09:28 +05:30
var err error
2022-03-29 11:59:02 +05:30
var teams [ ] * organization . Team
org := organization . OrgFromUser ( ctx . Repo . Owner )
2020-12-21 21:09:28 +05:30
// Admin has super access.
2022-03-22 12:33:22 +05:30
if ctx . Doer . IsAdmin {
2020-12-21 21:09:28 +05:30
isAdmin = true
} else {
2023-10-03 16:00:41 +05:30
isAdmin , err = org . IsOwnedBy ( ctx , ctx . Doer . ID )
2020-12-21 21:09:28 +05:30
if err != nil {
ctx . ServerError ( "IsOwnedBy" , err )
return
}
}
if isAdmin {
2023-10-03 16:00:41 +05:30
teams , err = org . LoadTeams ( ctx )
2021-11-19 17:11:40 +05:30
if err != nil {
2021-08-12 18:13:08 +05:30
ctx . ServerError ( "LoadTeams" , err )
2020-12-21 21:09:28 +05:30
return
}
} else {
2023-10-03 16:00:41 +05:30
teams , err = org . GetUserTeams ( ctx , ctx . Doer . ID )
2020-12-21 21:09:28 +05:30
if err != nil {
ctx . ServerError ( "GetUserTeams" , err )
return
}
}
2021-11-19 17:11:40 +05:30
ctx . Data [ "MentionableTeams" ] = teams
2020-12-21 21:09:28 +05:30
ctx . Data [ "MentionableTeamsOrg" ] = ctx . Repo . Owner . Name
Add context cache as a request level cache (#22294)
To avoid duplicated load of the same data in an HTTP request, we can set
a context cache to do that. i.e. Some pages may load a user from a
database with the same id in different areas on the same page. But the
code is hidden in two different deep logic. How should we share the
user? As a result of this PR, now if both entry functions accept
`context.Context` as the first parameter and we just need to refactor
`GetUserByID` to reuse the user from the context cache. Then it will not
be loaded twice on an HTTP request.
But of course, sometimes we would like to reload an object from the
database, that's why `RemoveContextData` is also exposed.
The core context cache is here. It defines a new context
```go
type cacheContext struct {
ctx context.Context
data map[any]map[any]any
lock sync.RWMutex
}
var cacheContextKey = struct{}{}
func WithCacheContext(ctx context.Context) context.Context {
return context.WithValue(ctx, cacheContextKey, &cacheContext{
ctx: ctx,
data: make(map[any]map[any]any),
})
}
```
Then you can use the below 4 methods to read/write/del the data within
the same context.
```go
func GetContextData(ctx context.Context, tp, key any) any
func SetContextData(ctx context.Context, tp, key, value any)
func RemoveContextData(ctx context.Context, tp, key any)
func GetWithContextCache[T any](ctx context.Context, cacheGroupKey string, cacheTargetID any, f func() (T, error)) (T, error)
```
Then let's take a look at how `system.GetString` implement it.
```go
func GetSetting(ctx context.Context, key string) (string, error) {
return cache.GetWithContextCache(ctx, contextCacheKey, key, func() (string, error) {
return cache.GetString(genSettingCacheKey(key), func() (string, error) {
res, err := GetSettingNoCache(ctx, key)
if err != nil {
return "", err
}
return res.SettingValue, nil
})
})
}
```
First, it will check if context data include the setting object with the
key. If not, it will query from the global cache which may be memory or
a Redis cache. If not, it will get the object from the database. In the
end, if the object gets from the global cache or database, it will be
set into the context cache.
An object stored in the context cache will only be destroyed after the
context disappeared.
2023-02-15 19:07:34 +05:30
ctx . Data [ "MentionableTeamsOrgAvatar" ] = ctx . Repo . Owner . AvatarLink ( ctx )
2020-12-21 21:09:28 +05:30
}
2023-04-07 05:41:02 +05:30
type userSearchInfo struct {
UserID int64 ` json:"user_id" `
UserName string ` json:"username" `
AvatarLink string ` json:"avatar_link" `
FullName string ` json:"full_name" `
}
type userSearchResponse struct {
Results [ ] * userSearchInfo ` json:"results" `
}
// IssuePosters get posters for current repo's issues/pull requests
func IssuePosters ( ctx * context . Context ) {
2023-07-20 18:11:28 +05:30
issuePosters ( ctx , false )
}
func PullPosters ( ctx * context . Context ) {
issuePosters ( ctx , true )
}
func issuePosters ( ctx * context . Context , isPullList bool ) {
2023-04-07 05:41:02 +05:30
repo := ctx . Repo . Repository
search := strings . TrimSpace ( ctx . FormString ( "q" ) )
posters , err := repo_model . GetIssuePostersWithSearch ( ctx , repo , isPullList , search , setting . UI . DefaultShowFullName )
if err != nil {
ctx . JSON ( http . StatusInternalServerError , err )
return
}
if search == "" && ctx . Doer != nil {
// the returned posters slice only contains limited number of users,
// to make the current user (doer) can quickly filter their own issues, always add doer to the posters slice
2023-09-07 15:07:47 +05:30
if ! slices . ContainsFunc ( posters , func ( user * user_model . User ) bool { return user . ID == ctx . Doer . ID } ) {
2023-04-07 05:41:02 +05:30
posters = append ( posters , ctx . Doer )
}
}
2023-08-25 16:37:42 +05:30
posters = MakeSelfOnTop ( ctx . Doer , posters )
2023-04-07 05:41:02 +05:30
resp := & userSearchResponse { }
resp . Results = make ( [ ] * userSearchInfo , len ( posters ) )
for i , user := range posters {
resp . Results [ i ] = & userSearchInfo { UserID : user . ID , UserName : user . Name , AvatarLink : user . AvatarLink ( ctx ) }
if setting . UI . DefaultShowFullName {
resp . Results [ i ] . FullName = user . FullName
}
}
ctx . JSON ( http . StatusOK , resp )
}