AdGuardHome/internal/querylog/querylogfile.go
Stanislav Chzhen ff04b2a7d3 Pull request 1731: 4299-stats-ignore
Merge in DNS/adguard-home from 4299-stats-ignore to master

Updates #1717.
Updates #4299.

Squashed commit of the following:

commit 1d1212d088c944e995deae2fd599eccb0a075033
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Mon Feb 13 17:53:36 2023 +0300

    fix changelog

commit 5f56852c21d794bd87c13192d3857757be10f9b2
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Mon Feb 13 17:39:02 2023 +0300

    add todo; fix data race

commit 89b8b16ddf5a43ebf68174cbaf9e8a53365f8cbe
Merge: e0a6bb49 ec19a85e
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Fri Feb 10 17:21:38 2023 +0300

    Merge branch 'master' into 4299-stats-ignore

commit e0a6bb490b651d1cf31589a7f17095fff4cb4dbb
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Fri Feb 10 17:21:06 2023 +0300

    interval under mutex

commit c569c7bc237f11b23fe47c98a20a1c5cb36751cb
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Fri Feb 10 16:19:35 2023 +0300

    fix mutex

commit 9374cf0c54dccc2fbfc38765b52c64e1c479137c
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Fri Feb 10 16:03:17 2023 +0300

    fix typo

commit 1f4fd1e7ab1b3c2f8e9c3d32ef7e4958f99abb47
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Fri Feb 10 15:55:44 2023 +0300

    add mutex

commit 2148048ce9ad228381cbb51a806c9b9cc21458fd
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Fri Feb 10 12:27:36 2023 +0300

    add key check

commit a19350977c463f888aea70d0dace26dff0173a65
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Thu Feb 9 18:34:36 2023 +0300

    fix changelog

commit 23c3b6da162dfd513884b460c265ba4cafeb9727
Merge: 8fccc0b8 b89105e3
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Thu Feb 9 13:28:59 2023 +0300

    Merge branch 'master' into 4299-stats-ignore

commit 8fccc0b8ec670a37e5209d795f35c43dd64afeb3
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Thu Feb 9 13:27:42 2023 +0300

    add changelog

commit 0416c71742795b2fb8adb0173dcd6a99d9d9c676
Author: Stanislav Chzhen <s.chzhen@adguard.com>
Date:   Wed Feb 8 14:31:55 2023 +0300

    all: stats ignore
2023-02-13 18:15:33 +03:00

186 lines
4 KiB
Go

package querylog
import (
"bytes"
"encoding/json"
"fmt"
"os"
"time"
"github.com/AdguardTeam/golibs/errors"
"github.com/AdguardTeam/golibs/log"
)
// flushLogBuffer flushes the current buffer to file and resets the current buffer
func (l *queryLog) flushLogBuffer(fullFlush bool) error {
if !l.conf.FileEnabled {
return nil
}
l.fileFlushLock.Lock()
defer l.fileFlushLock.Unlock()
// flush remainder to file
l.bufferLock.Lock()
needFlush := len(l.buffer) >= int(l.conf.MemSize)
if !needFlush && !fullFlush {
l.bufferLock.Unlock()
return nil
}
flushBuffer := l.buffer
l.buffer = nil
l.flushPending = false
l.bufferLock.Unlock()
err := l.flushToFile(flushBuffer)
if err != nil {
log.Error("Saving querylog to file failed: %s", err)
return err
}
return nil
}
// flushToFile saves the specified log entries to the query log file
func (l *queryLog) flushToFile(buffer []*logEntry) (err error) {
if len(buffer) == 0 {
log.Debug("querylog: there's nothing to write to a file")
return nil
}
start := time.Now()
var b bytes.Buffer
e := json.NewEncoder(&b)
for _, entry := range buffer {
err = e.Encode(entry)
if err != nil {
log.Error("Failed to marshal entry: %s", err)
return err
}
}
elapsed := time.Since(start)
log.Debug("%d elements serialized via json in %v: %d kB, %v/entry, %v/entry", len(buffer), elapsed, b.Len()/1024, float64(b.Len())/float64(len(buffer)), elapsed/time.Duration(len(buffer)))
var zb bytes.Buffer
filename := l.logFile
zb = b
l.fileWriteLock.Lock()
defer l.fileWriteLock.Unlock()
f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0o644)
if err != nil {
log.Error("failed to create file \"%s\": %s", filename, err)
return err
}
defer func() { err = errors.WithDeferred(err, f.Close()) }()
n, err := f.Write(zb.Bytes())
if err != nil {
log.Error("Couldn't write to file: %s", err)
return err
}
log.Debug("querylog: ok \"%s\": %v bytes written", filename, n)
return nil
}
func (l *queryLog) rotate() error {
from := l.logFile
to := l.logFile + ".1"
err := os.Rename(from, to)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
log.Debug("querylog: no log to rotate")
return nil
}
return fmt.Errorf("failed to rename old file: %w", err)
}
log.Debug("querylog: renamed %s into %s", from, to)
return nil
}
func (l *queryLog) readFileFirstTimeValue() (first time.Time, err error) {
var f *os.File
f, err = os.Open(l.logFile)
if err != nil {
return time.Time{}, err
}
defer func() { err = errors.WithDeferred(err, f.Close()) }()
buf := make([]byte, 512)
var r int
r, err = f.Read(buf)
if err != nil {
return time.Time{}, err
}
val := readJSONValue(string(buf[:r]), `"T":"`)
t, err := time.Parse(time.RFC3339Nano, val)
if err != nil {
return time.Time{}, err
}
log.Debug("querylog: the oldest log entry: %s", val)
return t, nil
}
func (l *queryLog) periodicRotate() {
defer log.OnPanic("querylog: rotating")
l.checkAndRotate()
// rotationCheckIvl is the period of time between checking the need for
// rotating log files. It's smaller of any available rotation interval to
// increase time accuracy.
//
// See https://github.com/AdguardTeam/AdGuardHome/issues/3823.
const rotationCheckIvl = 1 * time.Hour
rotations := time.NewTicker(rotationCheckIvl)
defer rotations.Stop()
for range rotations.C {
l.checkAndRotate()
}
}
// checkAndRotate rotates log files if those are older than the specified
// rotation interval.
func (l *queryLog) checkAndRotate() {
l.lock.Lock()
defer l.lock.Unlock()
oldest, err := l.readFileFirstTimeValue()
if err != nil && !errors.Is(err, os.ErrNotExist) {
log.Error("querylog: reading oldest record for rotation: %s", err)
return
}
if rot, now := oldest.Add(l.conf.RotationIvl), time.Now(); rot.After(now) {
log.Debug(
"querylog: %s <= %s, not rotating",
now.Format(time.RFC3339),
rot.Format(time.RFC3339),
)
return
}
err = l.rotate()
if err != nil {
log.Error("querylog: rotating: %s", err)
return
}
log.Debug("querylog: rotated successfully")
}