mirror of
https://codeberg.org/forgejo/forgejo.git
synced 2024-11-26 23:35:51 +03:00
6f9c278559
# ⚠️ Breaking Many deprecated queue config options are removed (actually, they should have been removed in 1.18/1.19). If you see the fatal message when starting Gitea: "Please update your app.ini to remove deprecated config options", please follow the error messages to remove these options from your app.ini. Example: ``` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].ISSUE_INDEXER_QUEUE_TYPE`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [E] Removed queue option: `[indexer].UPDATE_BUFFER_LEN`. Use new options in `[queue.issue_indexer]` 2023/05/06 19:39:22 [F] Please update your app.ini to remove deprecated config options ``` Many options in `[queue]` are are dropped, including: `WRAP_IF_NECESSARY`, `MAX_ATTEMPTS`, `TIMEOUT`, `WORKERS`, `BLOCK_TIMEOUT`, `BOOST_TIMEOUT`, `BOOST_WORKERS`, they can be removed from app.ini. # The problem The old queue package has some legacy problems: * complexity: I doubt few people could tell how it works. * maintainability: Too many channels and mutex/cond are mixed together, too many different structs/interfaces depends each other. * stability: due to the complexity & maintainability, sometimes there are strange bugs and difficult to debug, and some code doesn't have test (indeed some code is difficult to test because a lot of things are mixed together). * general applicability: although it is called "queue", its behavior is not a well-known queue. * scalability: it doesn't seem easy to make it work with a cluster without breaking its behaviors. It came from some very old code to "avoid breaking", however, its technical debt is too heavy now. It's a good time to introduce a better "queue" package. # The new queue package It keeps using old config and concept as much as possible. * It only contains two major kinds of concepts: * The "base queue": channel, levelqueue, redis * They have the same abstraction, the same interface, and they are tested by the same testing code. * The "WokerPoolQueue", it uses the "base queue" to provide "worker pool" function, calls the "handler" to process the data in the base queue. * The new code doesn't do "PushBack" * Think about a queue with many workers, the "PushBack" can't guarantee the order for re-queued unhandled items, so in new code it just does "normal push" * The new code doesn't do "pause/resume" * The "pause/resume" was designed to handle some handler's failure: eg: document indexer (elasticsearch) is down * If a queue is paused for long time, either the producers blocks or the new items are dropped. * The new code doesn't do such "pause/resume" trick, it's not a common queue's behavior and it doesn't help much. * If there are unhandled items, the "push" function just blocks for a few seconds and then re-queue them and retry. * The new code doesn't do "worker booster" * Gitea's queue's handlers are light functions, the cost is only the go-routine, so it doesn't make sense to "boost" them. * The new code only use "max worker number" to limit the concurrent workers. * The new "Push" never blocks forever * Instead of creating more and more blocking goroutines, return an error is more friendly to the server and to the end user. There are more details in code comments: eg: the "Flush" problem, the strange "code.index" hanging problem, the "immediate" queue problem. Almost ready for review. TODO: * [x] add some necessary comments during review * [x] add some more tests if necessary * [x] update documents and config options * [x] test max worker / active worker * [x] re-run the CI tasks to see whether any test is flaky * [x] improve the `handleOldLengthConfiguration` to provide more friendly messages * [x] fine tune default config values (eg: length?) ## Code coverage: ![image](https://user-images.githubusercontent.com/2114189/236620635-55576955-f95d-4810-b12f-879026a3afdf.png)
331 lines
9.6 KiB
Go
331 lines
9.6 KiB
Go
// Copyright 2023 The Gitea Authors. All rights reserved.
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
package queue
|
|
|
|
import (
|
|
"context"
|
|
"sync"
|
|
"sync/atomic"
|
|
"time"
|
|
|
|
"code.gitea.io/gitea/modules/log"
|
|
)
|
|
|
|
var (
|
|
infiniteTimerC = make(chan time.Time)
|
|
batchDebounceDuration = 100 * time.Millisecond
|
|
workerIdleDuration = 1 * time.Second
|
|
|
|
unhandledItemRequeueDuration atomic.Int64 // to avoid data race during test
|
|
)
|
|
|
|
func init() {
|
|
unhandledItemRequeueDuration.Store(int64(5 * time.Second))
|
|
}
|
|
|
|
// workerGroup is a group of workers to work with a WorkerPoolQueue
|
|
type workerGroup[T any] struct {
|
|
q *WorkerPoolQueue[T]
|
|
wg sync.WaitGroup
|
|
|
|
ctxWorker context.Context
|
|
ctxWorkerCancel context.CancelFunc
|
|
|
|
batchBuffer []T
|
|
popItemChan chan []byte
|
|
popItemErr chan error
|
|
}
|
|
|
|
func (wg *workerGroup[T]) doPrepareWorkerContext() {
|
|
wg.ctxWorker, wg.ctxWorkerCancel = context.WithCancel(wg.q.ctxRun)
|
|
}
|
|
|
|
// doDispatchBatchToWorker dispatches a batch of items to worker's channel.
|
|
// If the channel is full, it tries to start a new worker if possible.
|
|
func (q *WorkerPoolQueue[T]) doDispatchBatchToWorker(wg *workerGroup[T], flushChan chan flushType) {
|
|
batch := wg.batchBuffer
|
|
wg.batchBuffer = nil
|
|
|
|
if len(batch) == 0 {
|
|
return
|
|
}
|
|
|
|
full := false
|
|
select {
|
|
case q.batchChan <- batch:
|
|
default:
|
|
full = true
|
|
}
|
|
|
|
q.workerNumMu.Lock()
|
|
noWorker := q.workerNum == 0
|
|
if full || noWorker {
|
|
if q.workerNum < q.workerMaxNum || noWorker && q.workerMaxNum <= 0 {
|
|
q.workerNum++
|
|
q.doStartNewWorker(wg)
|
|
}
|
|
}
|
|
q.workerNumMu.Unlock()
|
|
|
|
if full {
|
|
select {
|
|
case q.batchChan <- batch:
|
|
case flush := <-flushChan:
|
|
q.doWorkerHandle(batch)
|
|
q.doFlush(wg, flush)
|
|
case <-q.ctxRun.Done():
|
|
wg.batchBuffer = batch // return the batch to buffer, the "doRun" function will handle it
|
|
}
|
|
}
|
|
}
|
|
|
|
// doWorkerHandle calls the safeHandler to handle a batch of items, and it increases/decreases the active worker number.
|
|
// If the context has been canceled, it should not be caller because the "Push" still needs the context, in such case, call q.safeHandler directly
|
|
func (q *WorkerPoolQueue[T]) doWorkerHandle(batch []T) {
|
|
q.workerNumMu.Lock()
|
|
q.workerActiveNum++
|
|
q.workerNumMu.Unlock()
|
|
|
|
defer func() {
|
|
q.workerNumMu.Lock()
|
|
q.workerActiveNum--
|
|
q.workerNumMu.Unlock()
|
|
}()
|
|
|
|
unhandled := q.safeHandler(batch...)
|
|
// if none of the items were handled, it should back-off for a few seconds
|
|
// in this case the handler (eg: document indexer) may have encountered some errors/failures
|
|
if len(unhandled) == len(batch) && unhandledItemRequeueDuration.Load() != 0 {
|
|
log.Error("Queue %q failed to handle batch of %d items, backoff for a few seconds", q.GetName(), len(batch))
|
|
select {
|
|
case <-q.ctxRun.Done():
|
|
case <-time.After(time.Duration(unhandledItemRequeueDuration.Load())):
|
|
}
|
|
}
|
|
for _, item := range unhandled {
|
|
if err := q.Push(item); err != nil {
|
|
if !q.basePushForShutdown(item) {
|
|
log.Error("Failed to requeue item for queue %q when calling handler: %v", q.GetName(), err)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// basePushForShutdown tries to requeue items into the base queue when the WorkerPoolQueue is shutting down.
|
|
// If the queue is shutting down, it returns true and try to push the items
|
|
// Otherwise it does nothing and returns false
|
|
func (q *WorkerPoolQueue[T]) basePushForShutdown(items ...T) bool {
|
|
ctxShutdown := q.ctxShutdown.Load()
|
|
if ctxShutdown == nil {
|
|
return false
|
|
}
|
|
for _, item := range items {
|
|
// if there is still any error, the queue can do nothing instead of losing the items
|
|
if err := q.baseQueue.PushItem(*ctxShutdown, q.marshal(item)); err != nil {
|
|
log.Error("Failed to requeue item for queue %q when shutting down: %v", q.GetName(), err)
|
|
}
|
|
}
|
|
return true
|
|
}
|
|
|
|
// doStartNewWorker starts a new worker for the queue, the worker reads from worker's channel and handles the items.
|
|
func (q *WorkerPoolQueue[T]) doStartNewWorker(wp *workerGroup[T]) {
|
|
wp.wg.Add(1)
|
|
|
|
go func() {
|
|
defer wp.wg.Done()
|
|
|
|
log.Debug("Queue %q starts new worker", q.GetName())
|
|
defer log.Debug("Queue %q stops idle worker", q.GetName())
|
|
|
|
t := time.NewTicker(workerIdleDuration)
|
|
keepWorking := true
|
|
stopWorking := func() {
|
|
q.workerNumMu.Lock()
|
|
keepWorking = false
|
|
q.workerNum--
|
|
q.workerNumMu.Unlock()
|
|
}
|
|
for keepWorking {
|
|
select {
|
|
case <-wp.ctxWorker.Done():
|
|
stopWorking()
|
|
case batch, ok := <-q.batchChan:
|
|
if !ok {
|
|
stopWorking()
|
|
} else {
|
|
q.doWorkerHandle(batch)
|
|
t.Reset(workerIdleDuration)
|
|
}
|
|
case <-t.C:
|
|
q.workerNumMu.Lock()
|
|
keepWorking = q.workerNum <= 1
|
|
if !keepWorking {
|
|
q.workerNum--
|
|
}
|
|
q.workerNumMu.Unlock()
|
|
}
|
|
}
|
|
}()
|
|
}
|
|
|
|
// doFlush flushes the queue: it tries to read all items from the queue and handles them.
|
|
// It is for testing purpose only. It's not designed to work for a cluster.
|
|
func (q *WorkerPoolQueue[T]) doFlush(wg *workerGroup[T], flush flushType) {
|
|
log.Debug("Queue %q starts flushing", q.GetName())
|
|
defer log.Debug("Queue %q finishes flushing", q.GetName())
|
|
|
|
// stop all workers, and prepare a new worker context to start new workers
|
|
|
|
wg.ctxWorkerCancel()
|
|
wg.wg.Wait()
|
|
|
|
defer func() {
|
|
close(flush)
|
|
wg.doPrepareWorkerContext()
|
|
}()
|
|
|
|
// drain the batch channel first
|
|
loop:
|
|
for {
|
|
select {
|
|
case batch := <-q.batchChan:
|
|
q.doWorkerHandle(batch)
|
|
default:
|
|
break loop
|
|
}
|
|
}
|
|
|
|
// drain the popItem channel
|
|
emptyCounter := 0
|
|
for {
|
|
select {
|
|
case data, dataOk := <-wg.popItemChan:
|
|
if !dataOk {
|
|
return
|
|
}
|
|
emptyCounter = 0
|
|
if v, jsonOk := q.unmarshal(data); !jsonOk {
|
|
continue
|
|
} else {
|
|
q.doWorkerHandle([]T{v})
|
|
}
|
|
case err := <-wg.popItemErr:
|
|
if !q.isCtxRunCanceled() {
|
|
log.Error("Failed to pop item from queue %q (doFlush): %v", q.GetName(), err)
|
|
}
|
|
return
|
|
case <-q.ctxRun.Done():
|
|
log.Debug("Queue %q is shutting down", q.GetName())
|
|
return
|
|
case <-time.After(20 * time.Millisecond):
|
|
// There is no reliable way to make sure all queue items are consumed by the Flush, there always might be some items stored in some buffers/temp variables.
|
|
// If we run Gitea in a cluster, we can even not guarantee all items are consumed in a deterministic instance.
|
|
// Luckily, the "Flush" trick is only used in tests, so far so good.
|
|
if cnt, _ := q.baseQueue.Len(q.ctxRun); cnt == 0 && len(wg.popItemChan) == 0 {
|
|
emptyCounter++
|
|
}
|
|
if emptyCounter >= 2 {
|
|
return
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
func (q *WorkerPoolQueue[T]) isCtxRunCanceled() bool {
|
|
select {
|
|
case <-q.ctxRun.Done():
|
|
return true
|
|
default:
|
|
return false
|
|
}
|
|
}
|
|
|
|
var skipFlushChan = make(chan flushType) // an empty flush chan, used to skip reading other flush requests
|
|
|
|
// doRun is the main loop of the queue. All related "doXxx" functions are executed in its context.
|
|
func (q *WorkerPoolQueue[T]) doRun() {
|
|
log.Debug("Queue %q starts running", q.GetName())
|
|
defer log.Debug("Queue %q stops running", q.GetName())
|
|
|
|
wg := &workerGroup[T]{q: q}
|
|
wg.doPrepareWorkerContext()
|
|
wg.popItemChan, wg.popItemErr = popItemByChan(q.ctxRun, q.baseQueue.PopItem)
|
|
|
|
defer func() {
|
|
q.ctxRunCancel()
|
|
|
|
// drain all data on the fly
|
|
// since the queue is shutting down, the items can't be dispatched to workers because the context is canceled
|
|
// it can't call doWorkerHandle either, because there is no chance to push unhandled items back to the queue
|
|
var unhandled []T
|
|
close(q.batchChan)
|
|
for batch := range q.batchChan {
|
|
unhandled = append(unhandled, batch...)
|
|
}
|
|
unhandled = append(unhandled, wg.batchBuffer...)
|
|
for data := range wg.popItemChan {
|
|
if v, ok := q.unmarshal(data); ok {
|
|
unhandled = append(unhandled, v)
|
|
}
|
|
}
|
|
|
|
ctxShutdownPtr := q.ctxShutdown.Load()
|
|
if ctxShutdownPtr != nil {
|
|
// if there is a shutdown context, try to push the items back to the base queue
|
|
q.basePushForShutdown(unhandled...)
|
|
workerDone := make(chan struct{})
|
|
// the only way to wait for the workers, because the handlers do not have context to wait for
|
|
go func() { wg.wg.Wait(); close(workerDone) }()
|
|
select {
|
|
case <-workerDone:
|
|
case <-(*ctxShutdownPtr).Done():
|
|
log.Error("Queue %q is shutting down, but workers are still running after timeout", q.GetName())
|
|
}
|
|
} else {
|
|
// if there is no shutdown context, just call the handler to try to handle the items. if the handler fails again, the items are lost
|
|
q.safeHandler(unhandled...)
|
|
}
|
|
|
|
close(q.shutdownDone)
|
|
}()
|
|
|
|
var batchDispatchC <-chan time.Time = infiniteTimerC
|
|
for {
|
|
select {
|
|
case data, dataOk := <-wg.popItemChan:
|
|
if !dataOk {
|
|
return
|
|
}
|
|
if v, jsonOk := q.unmarshal(data); !jsonOk {
|
|
testRecorder.Record("pop:corrupted:%s", data) // in rare cases the levelqueue(leveldb) might be corrupted
|
|
continue
|
|
} else {
|
|
wg.batchBuffer = append(wg.batchBuffer, v)
|
|
}
|
|
if len(wg.batchBuffer) >= q.batchLength {
|
|
q.doDispatchBatchToWorker(wg, q.flushChan)
|
|
} else if batchDispatchC == infiniteTimerC {
|
|
batchDispatchC = time.After(batchDebounceDuration)
|
|
} // else: batchDispatchC is already a debounce timer, it will be triggered soon
|
|
case <-batchDispatchC:
|
|
batchDispatchC = infiniteTimerC
|
|
q.doDispatchBatchToWorker(wg, q.flushChan)
|
|
case flush := <-q.flushChan:
|
|
// before flushing, it needs to try to dispatch the batch to worker first, in case there is no worker running
|
|
// after the flushing, there is at least one worker running, so "doFlush" could wait for workers to finish
|
|
// since we are already in a "flush" operation, so the dispatching function shouldn't read the flush chan.
|
|
q.doDispatchBatchToWorker(wg, skipFlushChan)
|
|
q.doFlush(wg, flush)
|
|
case err := <-wg.popItemErr:
|
|
if !q.isCtxRunCanceled() {
|
|
log.Error("Failed to pop item from queue %q (doRun): %v", q.GetName(), err)
|
|
}
|
|
return
|
|
case <-q.ctxRun.Done():
|
|
log.Debug("Queue %q is shutting down", q.GetName())
|
|
return
|
|
}
|
|
}
|
|
}
|