6f9c278559
# ⚠️ Breaking Many deprecated queue config options are removed (actually, they should have been removed in 1.18/1.19). If you see the fatal message when starting Gitea: "Please update your app.ini to remove deprecated config options", please follow the error messages to remove these options from your app.ini. Example: ``` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options ``` Many options in `[queue]` are are dropped, including: `WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`, `BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed from app.ini. # The problem The old queue package has some legacy problems: * complexity: I doubt few people could tell how it works. * maintainability: Too many channels and mutex/cond are mixed together, too many different structs/interfaces depends each other. * stability: due to the complexity & maintainability, sometimes there are strange bugs and difficult to debug, and some code doesn't have test (indeed some code is difficult to test because a lot of things are mixed together). * general applicability: although it is called "queue", its behavior is not a well-known queue. * scalability: it doesn't seem easy to make it work with a cluster without breaking its behaviors. It came from some very old code to "avoid breaking", however, its technical debt is too heavy now. It's a good time to introduce a better "queue" package. # The new queue package It keeps using old config and concept as much as possible. * It only contains two major kinds of concepts: * The "base queue": channel, levelqueue, redis * They have the same abstraction, the same interface, and they are tested by the same testing code. * The "WokerPoolQueue", it uses the "base queue" to provide "worker pool" function, calls the "handler" to process the data in the base queue. * The new code doesn't do "PushBack" * Think about a queue with many workers, the "PushBack" can't guarantee the order for re-queued unhandled items, so in new code it just does "normal push" * The new code doesn't do "pause/resume" * The "pause/resume" was designed to handle some handler's failure: eg: document indexer (elasticsearch) is down * If a queue is paused for long time, either the producers blocks or the new items are dropped. * The new code doesn't do such "pause/resume" trick, it's not a common queue's behavior and it doesn't help much. * If there are unhandled items, the "push" function just blocks for a few seconds and then re-queue them and retry. * The new code doesn't do "worker booster" * Gitea's queue's handlers are light functions, the cost is only the go-routine, so it doesn't make sense to "boost" them. * The new code only use "max worker number" to limit the concurrent workers. * The new "Push" never blocks forever * Instead of creating more and more blocking goroutines, return an error is more friendly to the server and to the end user. There are more details in code comments: eg: the "Flush" problem, the strange "code.index" hanging problem, the "immediate" queue problem. Almost ready for review. TODO: * [x] add some necessary comments during review * [x] add some more tests if necessary * [x] update documents and config options * [x] test max worker / active worker * [x] re-run the CI tasks to see whether any test is flaky * [x] improve the `handleOldLengthConfiguration` to provide more friendly messages * [x] fine tune default config values (eg: length?) ## Code coverage: ![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
135 lines
3.3 KiB
Go
135 lines
3.3 KiB
Go
// Copyright 2023 The Gitea Authors. All rights reserved.
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
package queue
|
|
|
|
import (
|
|
"context"
|
|
"sync"
|
|
"time"
|
|
|
|
"code.gitea.io/gitea/modules/graceful"
|
|
"code.gitea.io/gitea/modules/log"
|
|
"code.gitea.io/gitea/modules/nosql"
|
|
|
|
"github.com/redis/go-redis/v9"
|
|
)
|
|
|
|
type baseRedis struct {
|
|
client redis.UniversalClient
|
|
isUnique bool
|
|
cfg *BaseConfig
|
|
|
|
mu sync.Mutex // the old implementation is not thread-safe, the queue operation and set operation should be protected together
|
|
}
|
|
|
|
var _ baseQueue = (*baseRedis)(nil)
|
|
|
|
func newBaseRedisGeneric(cfg *BaseConfig, unique bool) (baseQueue, error) {
|
|
client := nosql.GetManager().GetRedisClient(cfg.ConnStr)
|
|
|
|
var err error
|
|
for i := 0; i < 10; i++ {
|
|
err = client.Ping(graceful.GetManager().ShutdownContext()).Err()
|
|
if err == nil {
|
|
break
|
|
}
|
|
log.Warn("Redis is not ready, waiting for 1 second to retry: %v", err)
|
|
time.Sleep(time.Second)
|
|
}
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return &baseRedis{cfg: cfg, client: client, isUnique: unique}, nil
|
|
}
|
|
|
|
func newBaseRedisSimple(cfg *BaseConfig) (baseQueue, error) {
|
|
return newBaseRedisGeneric(cfg, false)
|
|
}
|
|
|
|
func newBaseRedisUnique(cfg *BaseConfig) (baseQueue, error) {
|
|
return newBaseRedisGeneric(cfg, true)
|
|
}
|
|
|
|
func (q *baseRedis) PushItem(ctx context.Context, data []byte) error {
|
|
return backoffErr(ctx, backoffBegin, backoffUpper, time.After(pushBlockTime), func() (retry bool, err error) {
|
|
q.mu.Lock()
|
|
defer q.mu.Unlock()
|
|
|
|
cnt, err := q.client.LLen(ctx, q.cfg.QueueFullName).Result()
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
if int(cnt) >= q.cfg.Length {
|
|
return true, nil
|
|
}
|
|
|
|
if q.isUnique {
|
|
added, err := q.client.SAdd(ctx, q.cfg.SetFullName, data).Result()
|
|
if err != nil {
|
|
return false, err
|
|
}
|
|
if added == 0 {
|
|
return false, ErrAlreadyInQueue
|
|
}
|
|
}
|
|
return false, q.client.RPush(ctx, q.cfg.QueueFullName, data).Err()
|
|
})
|
|
}
|
|
|
|
func (q *baseRedis) PopItem(ctx context.Context) ([]byte, error) {
|
|
return backoffRetErr(ctx, backoffBegin, backoffUpper, infiniteTimerC, func() (retry bool, data []byte, err error) {
|
|
q.mu.Lock()
|
|
defer q.mu.Unlock()
|
|
|
|
data, err = q.client.LPop(ctx, q.cfg.QueueFullName).Bytes()
|
|
if err == redis.Nil {
|
|
return true, nil, nil
|
|
}
|
|
if err != nil {
|
|
return true, nil, nil
|
|
}
|
|
if q.isUnique {
|
|
// the data has been popped, even if there is any error we can't do anything
|
|
_ = q.client.SRem(ctx, q.cfg.SetFullName, data).Err()
|
|
}
|
|
return false, data, err
|
|
})
|
|
}
|
|
|
|
func (q *baseRedis) HasItem(ctx context.Context, data []byte) (bool, error) {
|
|
q.mu.Lock()
|
|
defer q.mu.Unlock()
|
|
if !q.isUnique {
|
|
return false, nil
|
|
}
|
|
return q.client.SIsMember(ctx, q.cfg.SetFullName, data).Result()
|
|
}
|
|
|
|
func (q *baseRedis) Len(ctx context.Context) (int, error) {
|
|
q.mu.Lock()
|
|
defer q.mu.Unlock()
|
|
cnt, err := q.client.LLen(ctx, q.cfg.QueueFullName).Result()
|
|
return int(cnt), err
|
|
}
|
|
|
|
func (q *baseRedis) Close() error {
|
|
q.mu.Lock()
|
|
defer q.mu.Unlock()
|
|
return q.client.Close()
|
|
}
|
|
|
|
func (q *baseRedis) RemoveAll(ctx context.Context) error {
|
|
q.mu.Lock()
|
|
defer q.mu.Unlock()
|
|
c1 := q.client.Del(ctx, q.cfg.QueueFullName)
|
|
c2 := q.client.Del(ctx, q.cfg.SetFullName)
|
|
if c1.Err() != nil {
|
|
return c1.Err()
|
|
}
|
|
if c2.Err() != nil {
|
|
return c2.Err()
|
|
}
|
|
return nil // actually, checking errors doesn't make sense here because the state could be out-of-sync
|
|
}
|