2019-08-22 16:34:58 +03:00
|
|
|
package stats
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"encoding/binary"
|
|
|
|
"encoding/gob"
|
|
|
|
"fmt"
|
|
|
|
"time"
|
|
|
|
|
2021-05-24 17:28:11 +03:00
|
|
|
"github.com/AdguardTeam/golibs/errors"
|
2019-08-22 16:34:58 +03:00
|
|
|
"github.com/AdguardTeam/golibs/log"
|
2023-02-13 18:15:33 +03:00
|
|
|
"github.com/AdguardTeam/golibs/stringutil"
|
2022-08-04 19:05:28 +03:00
|
|
|
"go.etcd.io/bbolt"
|
2023-02-21 16:38:22 +03:00
|
|
|
"golang.org/x/exp/slices"
|
2019-08-22 16:34:58 +03:00
|
|
|
)
|
|
|
|
|
2021-02-11 17:55:37 +03:00
|
|
|
// TODO(a.garipov): Rewrite all of this. Add proper error handling and
|
|
|
|
// inspection. Improve logging. Decrease complexity.
|
|
|
|
|
2019-08-22 16:34:58 +03:00
|
|
|
const (
|
2022-08-17 14:09:13 +03:00
|
|
|
// maxDomains is the max number of top domains to return.
|
|
|
|
maxDomains = 100
|
|
|
|
// maxClients is the max number of top clients to return.
|
|
|
|
maxClients = 100
|
2019-08-22 16:34:58 +03:00
|
|
|
)
|
|
|
|
|
2022-08-17 14:09:13 +03:00
|
|
|
// UnitIDGenFunc is the signature of a function that generates a unique ID for
|
|
|
|
// the statistics unit.
|
|
|
|
type UnitIDGenFunc func() (id uint32)
|
|
|
|
|
|
|
|
// TimeUnit is the unit of measuring time while aggregating the statistics.
|
|
|
|
type TimeUnit int
|
|
|
|
|
|
|
|
// Supported TimeUnit values.
|
|
|
|
const (
|
|
|
|
Hours TimeUnit = iota
|
|
|
|
Days
|
|
|
|
)
|
|
|
|
|
|
|
|
// Result is the resulting code of processing the DNS request.
|
|
|
|
type Result int
|
|
|
|
|
|
|
|
// Supported Result values.
|
2022-08-04 19:05:28 +03:00
|
|
|
//
|
2022-08-17 14:09:13 +03:00
|
|
|
// TODO(e.burkov): Think about better naming.
|
|
|
|
const (
|
|
|
|
RNotFiltered Result = iota + 1
|
|
|
|
RFiltered
|
|
|
|
RSafeBrowsing
|
|
|
|
RSafeSearch
|
|
|
|
RParental
|
|
|
|
|
|
|
|
resultLast = RParental + 1
|
|
|
|
)
|
|
|
|
|
|
|
|
// Entry is a statistics data entry.
|
|
|
|
type Entry struct {
|
|
|
|
// Clients is the client's primary ID.
|
|
|
|
//
|
|
|
|
// TODO(a.garipov): Make this a {net.IP, string} enum?
|
|
|
|
Client string
|
|
|
|
|
|
|
|
// Domain is the domain name requested.
|
|
|
|
Domain string
|
|
|
|
|
|
|
|
// Result is the result of processing the request.
|
|
|
|
Result Result
|
|
|
|
|
|
|
|
// Time is the duration of the request processing in milliseconds.
|
|
|
|
Time uint32
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
2022-08-04 19:05:28 +03:00
|
|
|
// unit collects the statistics data for a specific period of time.
|
2019-08-22 16:34:58 +03:00
|
|
|
type unit struct {
|
2023-04-05 15:50:14 +03:00
|
|
|
// domains stores the number of requests for each domain.
|
|
|
|
domains map[string]uint64
|
|
|
|
|
|
|
|
// blockedDomains stores the number of requests for each domain that has
|
|
|
|
// been blocked.
|
|
|
|
blockedDomains map[string]uint64
|
|
|
|
|
|
|
|
// clients stores the number of requests from each client.
|
|
|
|
clients map[string]uint64
|
|
|
|
|
|
|
|
// nResult stores the number of requests grouped by it's result.
|
|
|
|
nResult []uint64
|
|
|
|
|
2022-08-04 19:05:28 +03:00
|
|
|
// id is the unique unit's identifier. It's set to an absolute hour number
|
|
|
|
// since the beginning of UNIX time by the default ID generating function.
|
2022-08-17 14:09:13 +03:00
|
|
|
//
|
|
|
|
// Must not be rewritten after creating to be accessed concurrently without
|
|
|
|
// using mu.
|
2022-08-04 19:05:28 +03:00
|
|
|
id uint32
|
|
|
|
|
|
|
|
// nTotal stores the total number of requests.
|
|
|
|
nTotal uint64
|
2023-04-05 15:50:14 +03:00
|
|
|
|
2022-08-04 19:05:28 +03:00
|
|
|
// timeSum stores the sum of processing time in milliseconds of each request
|
|
|
|
// written by the unit.
|
|
|
|
timeSum uint64
|
|
|
|
}
|
|
|
|
|
2022-08-17 14:09:13 +03:00
|
|
|
// newUnit allocates the new *unit.
|
|
|
|
func newUnit(id uint32) (u *unit) {
|
|
|
|
return &unit{
|
2023-04-05 15:50:14 +03:00
|
|
|
domains: map[string]uint64{},
|
|
|
|
blockedDomains: map[string]uint64{},
|
|
|
|
clients: map[string]uint64{},
|
2022-08-17 14:09:13 +03:00
|
|
|
nResult: make([]uint64, resultLast),
|
2023-04-05 15:50:14 +03:00
|
|
|
id: id,
|
2022-08-17 14:09:13 +03:00
|
|
|
}
|
2022-08-04 19:05:28 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// countPair is a single name-number pair for deserializing statistics data into
|
|
|
|
// the database.
|
2019-08-22 16:34:58 +03:00
|
|
|
type countPair struct {
|
|
|
|
Name string
|
2019-09-10 17:59:10 +03:00
|
|
|
Count uint64
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
2022-08-17 14:09:13 +03:00
|
|
|
// unitDB is the structure for serializing statistics data into the database.
|
2023-04-05 15:50:14 +03:00
|
|
|
//
|
|
|
|
// NOTE: Do not change the names or types of fields, as this structure is used
|
|
|
|
// for GOB encoding.
|
2019-08-22 16:34:58 +03:00
|
|
|
type unitDB struct {
|
2022-08-04 19:05:28 +03:00
|
|
|
// NResult is the number of requests by the result's kind.
|
2019-09-10 17:59:10 +03:00
|
|
|
NResult []uint64
|
2019-08-22 16:34:58 +03:00
|
|
|
|
2022-08-04 19:05:28 +03:00
|
|
|
// Domains is the number of requests for each domain name.
|
|
|
|
Domains []countPair
|
2023-04-05 15:50:14 +03:00
|
|
|
|
2022-08-04 19:05:28 +03:00
|
|
|
// BlockedDomains is the number of requests blocked for each domain name.
|
2019-08-22 16:34:58 +03:00
|
|
|
BlockedDomains []countPair
|
2023-04-05 15:50:14 +03:00
|
|
|
|
2022-08-04 19:05:28 +03:00
|
|
|
// Clients is the number of requests from each client.
|
|
|
|
Clients []countPair
|
2019-08-22 16:34:58 +03:00
|
|
|
|
2023-04-05 15:50:14 +03:00
|
|
|
// NTotal is the total number of requests.
|
|
|
|
NTotal uint64
|
|
|
|
|
2022-08-04 19:05:28 +03:00
|
|
|
// TimeAvg is the average of processing times in milliseconds of all the
|
|
|
|
// requests in the unit.
|
|
|
|
TimeAvg uint32
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
2022-08-04 19:05:28 +03:00
|
|
|
// newUnitID is the default UnitIDGenFunc that generates the unique id hourly.
|
|
|
|
func newUnitID() (id uint32) {
|
|
|
|
const secsInHour = int64(time.Hour / time.Second)
|
|
|
|
|
|
|
|
return uint32(time.Now().Unix() / secsInHour)
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
2022-08-17 14:09:13 +03:00
|
|
|
func finishTxn(tx *bbolt.Tx, commit bool) (err error) {
|
|
|
|
if commit {
|
|
|
|
err = errors.Annotate(tx.Commit(), "committing: %w")
|
|
|
|
} else {
|
|
|
|
err = errors.Annotate(tx.Rollback(), "rolling back: %w")
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
2022-08-04 19:05:28 +03:00
|
|
|
|
2022-08-17 14:09:13 +03:00
|
|
|
return err
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
2021-08-27 14:50:37 +03:00
|
|
|
// bucketNameLen is the length of a bucket, a 64-bit unsigned integer.
|
|
|
|
//
|
|
|
|
// TODO(a.garipov): Find out why a 64-bit integer is used when IDs seem to
|
|
|
|
// always be 32 bits.
|
|
|
|
const bucketNameLen = 8
|
|
|
|
|
|
|
|
// idToUnitName converts a numerical ID into a database unit name.
|
|
|
|
func idToUnitName(id uint32) (name []byte) {
|
2022-08-04 19:05:28 +03:00
|
|
|
n := [bucketNameLen]byte{}
|
|
|
|
binary.BigEndian.PutUint64(n[:], uint64(id))
|
2019-08-22 16:34:58 +03:00
|
|
|
|
2022-08-04 19:05:28 +03:00
|
|
|
return n[:]
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
2021-08-27 14:50:37 +03:00
|
|
|
// unitNameToID converts a database unit name into a numerical ID. ok is false
|
|
|
|
// if name is not a valid database unit name.
|
|
|
|
func unitNameToID(name []byte) (id uint32, ok bool) {
|
|
|
|
if len(name) < bucketNameLen {
|
|
|
|
return 0, false
|
|
|
|
}
|
|
|
|
|
|
|
|
return uint32(binary.BigEndian.Uint64(name)), true
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
2022-08-17 14:09:13 +03:00
|
|
|
func convertMapToSlice(m map[string]uint64, max int) (s []countPair) {
|
|
|
|
s = make([]countPair, 0, len(m))
|
|
|
|
for k, v := range m {
|
|
|
|
s = append(s, countPair{Name: k, Count: v})
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
2021-01-27 18:32:13 +03:00
|
|
|
|
2023-02-21 16:38:22 +03:00
|
|
|
slices.SortFunc(s, func(a, b countPair) (sortsBefore bool) {
|
2023-02-22 12:13:07 +03:00
|
|
|
return a.Count > b.Count
|
2022-08-17 14:09:13 +03:00
|
|
|
})
|
|
|
|
if max > len(s) {
|
|
|
|
max = len(s)
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
2021-02-11 17:55:37 +03:00
|
|
|
|
2022-08-17 14:09:13 +03:00
|
|
|
return s[:max]
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
2022-08-17 14:09:13 +03:00
|
|
|
func convertSliceToMap(a []countPair) (m map[string]uint64) {
|
|
|
|
m = map[string]uint64{}
|
2019-08-22 16:34:58 +03:00
|
|
|
for _, it := range a {
|
2019-09-10 17:59:10 +03:00
|
|
|
m[it.Name] = it.Count
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
2022-08-17 14:09:13 +03:00
|
|
|
|
2019-08-22 16:34:58 +03:00
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
2022-08-17 14:09:13 +03:00
|
|
|
// serialize converts u to the *unitDB. It's safe for concurrent use. u must
|
|
|
|
// not be nil.
|
2022-08-04 19:05:28 +03:00
|
|
|
func (u *unit) serialize() (udb *unitDB) {
|
|
|
|
var timeAvg uint32 = 0
|
2019-08-22 16:34:58 +03:00
|
|
|
if u.nTotal != 0 {
|
2022-08-04 19:05:28 +03:00
|
|
|
timeAvg = uint32(u.timeSum / u.nTotal)
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
2020-11-06 12:15:08 +03:00
|
|
|
|
2022-08-04 19:05:28 +03:00
|
|
|
return &unitDB{
|
|
|
|
NTotal: u.nTotal,
|
|
|
|
NResult: append([]uint64{}, u.nResult...),
|
|
|
|
Domains: convertMapToSlice(u.domains, maxDomains),
|
|
|
|
BlockedDomains: convertMapToSlice(u.blockedDomains, maxDomains),
|
|
|
|
Clients: convertMapToSlice(u.clients, maxClients),
|
|
|
|
TimeAvg: timeAvg,
|
|
|
|
}
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
2022-08-17 14:09:13 +03:00
|
|
|
func loadUnitFromDB(tx *bbolt.Tx, id uint32) (udb *unitDB) {
|
|
|
|
bkt := tx.Bucket(idToUnitName(id))
|
|
|
|
if bkt == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Tracef("Loading unit %d", id)
|
|
|
|
|
|
|
|
var buf bytes.Buffer
|
|
|
|
buf.Write(bkt.Get([]byte{0}))
|
|
|
|
udb = &unitDB{}
|
|
|
|
|
|
|
|
err := gob.NewDecoder(&buf).Decode(udb)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("gob Decode: %s", err)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return udb
|
|
|
|
}
|
|
|
|
|
2022-08-04 19:05:28 +03:00
|
|
|
// deserealize assigns the appropriate values from udb to u. u must not be nil.
|
|
|
|
// It's safe for concurrent use.
|
|
|
|
func (u *unit) deserialize(udb *unitDB) {
|
|
|
|
if udb == nil {
|
|
|
|
return
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
2019-09-10 18:04:43 +03:00
|
|
|
|
2022-08-04 19:05:28 +03:00
|
|
|
u.nTotal = udb.NTotal
|
|
|
|
u.nResult = make([]uint64, resultLast)
|
|
|
|
copy(u.nResult, udb.NResult)
|
2021-01-27 18:32:13 +03:00
|
|
|
u.domains = convertSliceToMap(udb.Domains)
|
|
|
|
u.blockedDomains = convertSliceToMap(udb.BlockedDomains)
|
|
|
|
u.clients = convertSliceToMap(udb.Clients)
|
2022-08-04 19:05:28 +03:00
|
|
|
u.timeSum = uint64(udb.TimeAvg) * udb.NTotal
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
2022-08-17 14:09:13 +03:00
|
|
|
// add adds new data to u. It's safe for concurrent use.
|
|
|
|
func (u *unit) add(res Result, domain, cli string, dur uint64) {
|
|
|
|
u.nResult[res]++
|
|
|
|
if res == RNotFiltered {
|
|
|
|
u.domains[domain]++
|
|
|
|
} else {
|
|
|
|
u.blockedDomains[domain]++
|
|
|
|
}
|
|
|
|
|
|
|
|
u.clients[cli]++
|
|
|
|
u.timeSum += dur
|
|
|
|
u.nTotal++
|
|
|
|
}
|
|
|
|
|
|
|
|
// flushUnitToDB puts udb to the database at id.
|
|
|
|
func (udb *unitDB) flushUnitToDB(tx *bbolt.Tx, id uint32) (err error) {
|
|
|
|
log.Debug("stats: flushing unit with id %d and total of %d", id, udb.NTotal)
|
2019-08-22 16:34:58 +03:00
|
|
|
|
2021-08-27 14:50:37 +03:00
|
|
|
bkt, err := tx.CreateBucketIfNotExists(idToUnitName(id))
|
2019-08-22 16:34:58 +03:00
|
|
|
if err != nil {
|
2022-08-17 14:09:13 +03:00
|
|
|
return fmt.Errorf("creating bucket: %w", err)
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
2022-08-17 14:09:13 +03:00
|
|
|
buf := &bytes.Buffer{}
|
|
|
|
err = gob.NewEncoder(buf).Encode(udb)
|
2019-08-22 16:34:58 +03:00
|
|
|
if err != nil {
|
2022-08-17 14:09:13 +03:00
|
|
|
return fmt.Errorf("encoding unit: %w", err)
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
err = bkt.Put([]byte{0}, buf.Bytes())
|
|
|
|
if err != nil {
|
2022-08-17 14:09:13 +03:00
|
|
|
return fmt.Errorf("putting unit to database: %w", err)
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
2022-08-17 14:09:13 +03:00
|
|
|
return nil
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
2022-08-04 19:05:28 +03:00
|
|
|
func convertTopSlice(a []countPair) (m []map[string]uint64) {
|
|
|
|
m = make([]map[string]uint64, 0, len(a))
|
2019-08-22 16:34:58 +03:00
|
|
|
for _, it := range a {
|
2022-08-04 19:05:28 +03:00
|
|
|
m = append(m, map[string]uint64{it.Name: it.Count})
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
2022-08-04 19:05:28 +03:00
|
|
|
|
2019-08-22 16:34:58 +03:00
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
2021-02-09 19:38:31 +03:00
|
|
|
// numsGetter is a signature for statsCollector argument.
|
|
|
|
type numsGetter func(u *unitDB) (num uint64)
|
|
|
|
|
|
|
|
// statsCollector collects statisctics for the given *unitDB slice by specified
|
|
|
|
// timeUnit using ng to retrieve data.
|
|
|
|
func statsCollector(units []*unitDB, firstID uint32, timeUnit TimeUnit, ng numsGetter) (nums []uint64) {
|
|
|
|
if timeUnit == Hours {
|
2022-08-17 14:09:13 +03:00
|
|
|
nums = make([]uint64, 0, len(units))
|
2021-02-09 19:38:31 +03:00
|
|
|
for _, u := range units {
|
|
|
|
nums = append(nums, ng(u))
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Per time unit counters: 720 hours may span 31 days, so we
|
|
|
|
// skip data for the first day in this case.
|
|
|
|
// align_ceil(24)
|
|
|
|
firstDayID := (firstID + 24 - 1) / 24 * 24
|
|
|
|
|
|
|
|
var sum uint64
|
|
|
|
id := firstDayID
|
|
|
|
nextDayID := firstDayID + 24
|
|
|
|
for i := int(firstDayID - firstID); i != len(units); i++ {
|
|
|
|
sum += ng(units[i])
|
|
|
|
if id == nextDayID {
|
|
|
|
nums = append(nums, sum)
|
|
|
|
sum = 0
|
|
|
|
nextDayID += 24
|
|
|
|
}
|
|
|
|
id++
|
|
|
|
}
|
|
|
|
if id <= nextDayID {
|
|
|
|
nums = append(nums, sum)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nums
|
|
|
|
}
|
|
|
|
|
|
|
|
// pairsGetter is a signature for topsCollector argument.
|
|
|
|
type pairsGetter func(u *unitDB) (pairs []countPair)
|
|
|
|
|
2022-08-04 19:05:28 +03:00
|
|
|
// topsCollector collects statistics about highest values from the given *unitDB
|
2021-02-09 19:38:31 +03:00
|
|
|
// slice using pg to retrieve data.
|
2023-02-13 18:15:33 +03:00
|
|
|
func topsCollector(units []*unitDB, max int, ignored *stringutil.Set, pg pairsGetter) []map[string]uint64 {
|
2021-02-09 19:38:31 +03:00
|
|
|
m := map[string]uint64{}
|
|
|
|
for _, u := range units {
|
2022-08-04 19:05:28 +03:00
|
|
|
for _, cp := range pg(u) {
|
2023-02-13 18:15:33 +03:00
|
|
|
if !ignored.Has(cp.Name) {
|
|
|
|
m[cp.Name] += cp.Count
|
|
|
|
}
|
2021-02-09 19:38:31 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
a2 := convertMapToSlice(m, max)
|
2022-08-17 14:09:13 +03:00
|
|
|
|
2021-02-09 19:38:31 +03:00
|
|
|
return convertTopSlice(a2)
|
|
|
|
}
|
|
|
|
|
2022-08-31 18:57:02 +03:00
|
|
|
// getData returns the statistics data using the following algorithm:
|
|
|
|
//
|
|
|
|
// 1. Prepare a slice of N units, where N is the value of "limit" configuration
|
|
|
|
// setting. Load data for the most recent units from the file. If a unit
|
|
|
|
// with required ID doesn't exist, just add an empty unit. Get data for the
|
|
|
|
// current unit.
|
|
|
|
//
|
|
|
|
// 2. Process data from the units and prepare an output map object, including
|
|
|
|
// per time unit counters (DNS queries per time-unit, blocked queries per
|
|
|
|
// time unit, etc.). If the time unit is hour, just add values from each
|
|
|
|
// unit to the slice; otherwise, the time unit is day, so aggregate per-hour
|
|
|
|
// data into days.
|
|
|
|
//
|
|
|
|
// To get the top counters (queries per domain, queries per blocked domain,
|
|
|
|
// etc.), first sum up data for all units into a single map. Then, get the
|
|
|
|
// pairs with the highest numbers.
|
|
|
|
//
|
|
|
|
// The total counters (DNS queries, blocked, etc.) are just the sum of data
|
|
|
|
// for all units.
|
2022-08-17 14:09:13 +03:00
|
|
|
func (s *StatsCtx) getData(limit uint32) (StatsResp, bool) {
|
2022-08-04 19:05:28 +03:00
|
|
|
if limit == 0 {
|
2022-08-17 14:09:13 +03:00
|
|
|
return StatsResp{
|
2022-08-04 19:05:28 +03:00
|
|
|
TimeUnits: "days",
|
|
|
|
|
|
|
|
TopBlocked: []topAddrs{},
|
|
|
|
TopClients: []topAddrs{},
|
|
|
|
TopQueried: []topAddrs{},
|
|
|
|
|
|
|
|
BlockedFiltering: []uint64{},
|
|
|
|
DNSQueries: []uint64{},
|
|
|
|
ReplacedParental: []uint64{},
|
|
|
|
ReplacedSafebrowsing: []uint64{},
|
|
|
|
}, true
|
|
|
|
}
|
2019-12-11 12:38:58 +03:00
|
|
|
|
|
|
|
timeUnit := Hours
|
|
|
|
if limit/24 > 7 {
|
|
|
|
timeUnit = Days
|
|
|
|
}
|
2019-08-22 16:34:58 +03:00
|
|
|
|
2019-12-11 12:38:58 +03:00
|
|
|
units, firstID := s.loadUnits(limit)
|
2019-10-07 15:55:09 +03:00
|
|
|
if units == nil {
|
2022-08-17 14:09:13 +03:00
|
|
|
return StatsResp{}, false
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
2021-02-09 19:38:31 +03:00
|
|
|
dnsQueries := statsCollector(units, firstID, timeUnit, func(u *unitDB) (num uint64) { return u.NTotal })
|
2020-11-20 17:32:41 +03:00
|
|
|
if timeUnit != Hours && len(dnsQueries) != int(limit/24) {
|
|
|
|
log.Fatalf("len(dnsQueries) != limit: %d %d", len(dnsQueries), limit)
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
2022-08-17 14:09:13 +03:00
|
|
|
data := StatsResp{
|
2021-01-21 19:55:41 +03:00
|
|
|
DNSQueries: dnsQueries,
|
2021-02-09 19:38:31 +03:00
|
|
|
BlockedFiltering: statsCollector(units, firstID, timeUnit, func(u *unitDB) (num uint64) { return u.NResult[RFiltered] }),
|
|
|
|
ReplacedSafebrowsing: statsCollector(units, firstID, timeUnit, func(u *unitDB) (num uint64) { return u.NResult[RSafeBrowsing] }),
|
|
|
|
ReplacedParental: statsCollector(units, firstID, timeUnit, func(u *unitDB) (num uint64) { return u.NResult[RParental] }),
|
2023-02-13 18:15:33 +03:00
|
|
|
TopQueried: topsCollector(units, maxDomains, s.ignored, func(u *unitDB) (pairs []countPair) { return u.Domains }),
|
|
|
|
TopBlocked: topsCollector(units, maxDomains, s.ignored, func(u *unitDB) (pairs []countPair) { return u.BlockedDomains }),
|
2023-04-14 15:25:04 +03:00
|
|
|
TopClients: topsCollector(units, maxClients, nil, topClientPairs(s)),
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
2021-02-09 19:38:31 +03:00
|
|
|
// Total counters:
|
|
|
|
sum := unitDB{
|
2022-08-04 19:05:28 +03:00
|
|
|
NResult: make([]uint64, resultLast),
|
2021-02-09 19:38:31 +03:00
|
|
|
}
|
2019-08-22 16:34:58 +03:00
|
|
|
timeN := 0
|
|
|
|
for _, u := range units {
|
|
|
|
sum.NTotal += u.NTotal
|
|
|
|
sum.TimeAvg += u.TimeAvg
|
|
|
|
if u.TimeAvg != 0 {
|
|
|
|
timeN++
|
|
|
|
}
|
|
|
|
sum.NResult[RFiltered] += u.NResult[RFiltered]
|
|
|
|
sum.NResult[RSafeBrowsing] += u.NResult[RSafeBrowsing]
|
|
|
|
sum.NResult[RSafeSearch] += u.NResult[RSafeSearch]
|
|
|
|
sum.NResult[RParental] += u.NResult[RParental]
|
|
|
|
}
|
|
|
|
|
2021-01-21 19:55:41 +03:00
|
|
|
data.NumDNSQueries = sum.NTotal
|
|
|
|
data.NumBlockedFiltering = sum.NResult[RFiltered]
|
|
|
|
data.NumReplacedSafebrowsing = sum.NResult[RSafeBrowsing]
|
|
|
|
data.NumReplacedSafesearch = sum.NResult[RSafeSearch]
|
|
|
|
data.NumReplacedParental = sum.NResult[RParental]
|
2019-08-22 16:34:58 +03:00
|
|
|
|
|
|
|
if timeN != 0 {
|
2021-01-21 19:55:41 +03:00
|
|
|
data.AvgProcessingTime = float64(sum.TimeAvg/uint32(timeN)) / 1000000
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
2021-01-21 19:55:41 +03:00
|
|
|
data.TimeUnits = "hours"
|
2019-08-22 16:34:58 +03:00
|
|
|
if timeUnit == Days {
|
2021-01-21 19:55:41 +03:00
|
|
|
data.TimeUnits = "days"
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
|
|
|
|
2021-01-21 19:55:41 +03:00
|
|
|
return data, true
|
2019-08-22 16:34:58 +03:00
|
|
|
}
|
2023-04-14 15:25:04 +03:00
|
|
|
|
|
|
|
func topClientPairs(s *StatsCtx) (pg pairsGetter) {
|
|
|
|
return func(u *unitDB) (clients []countPair) {
|
|
|
|
for _, c := range u.Clients {
|
|
|
|
if c.Name != "" && !s.shouldCountClient([]string{c.Name}) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
clients = append(clients, c)
|
|
|
|
}
|
|
|
|
|
|
|
|
return clients
|
|
|
|
}
|
|
|
|
}
|