mirror of
https://github.com/AdguardTeam/AdGuardHome.git
synced 2024-11-26 06:55:48 +03:00
Merge: -(dnsforward): fixing an issue with the querylog speed
Squashed commit of the following: commit 7db062d2ce40369fc6c7eaa89f678c660d531516 Author: Andrey Meshkov <am@adguard.com> Date: Thu Feb 27 12:08:58 2020 +0300 *(dnsforward): added comment about oldest commit dbec84d8a8c60ca040faa59f8aa818ed5959e92a Author: Simon Zolin <s.zolin@adguard.com> Date: Thu Feb 27 11:16:58 2020 +0300 fix commit d6df1ee28b3918e8d1676ff6ab4b516d46fe4202 Author: Andrey Meshkov <am@adguard.com> Date: Wed Feb 26 20:42:11 2020 +0300 -(dnsforward): fixing an issue with the querylog speed
This commit is contained in:
parent
d839136fee
commit
f84331abde
2 changed files with 22 additions and 8 deletions
|
@ -231,6 +231,13 @@ func (l *queryLog) getData(params getDataParams) map[string]interface{} {
|
||||||
// remove extra records
|
// remove extra records
|
||||||
entries = entries[(len(entries) - getDataLimit):]
|
entries = entries[(len(entries) - getDataLimit):]
|
||||||
}
|
}
|
||||||
|
if len(entries) == getDataLimit {
|
||||||
|
// change the "oldest" value here.
|
||||||
|
// we cannot use the "oldest" we got from "searchFiles" anymore
|
||||||
|
// because after adding in-memory records and removing extra records
|
||||||
|
// the situation has changed
|
||||||
|
oldest = entries[len(entries)-1].Time
|
||||||
|
}
|
||||||
|
|
||||||
// init the response object
|
// init the response object
|
||||||
var data = []map[string]interface{}{}
|
var data = []map[string]interface{}{}
|
||||||
|
@ -246,9 +253,6 @@ func (l *queryLog) getData(params getDataParams) map[string]interface{} {
|
||||||
len(entries), total, params.OlderThan, time.Since(now))
|
len(entries), total, params.OlderThan, time.Since(now))
|
||||||
|
|
||||||
var result = map[string]interface{}{}
|
var result = map[string]interface{}{}
|
||||||
if len(entries) == getDataLimit {
|
|
||||||
oldest = entries[0].Time
|
|
||||||
}
|
|
||||||
result["oldest"] = ""
|
result["oldest"] = ""
|
||||||
if !oldest.IsZero() {
|
if !oldest.IsZero() {
|
||||||
result["oldest"] = oldest.Format(time.RFC3339Nano)
|
result["oldest"] = oldest.Format(time.RFC3339Nano)
|
||||||
|
|
|
@ -36,10 +36,16 @@ func (l *queryLog) searchFiles(params getDataParams) ([]*logEntry, time.Time, in
|
||||||
err = r.SeekStart()
|
err = r.SeekStart()
|
||||||
} else {
|
} else {
|
||||||
err = r.Seek(params.OlderThan.UnixNano())
|
err = r.Seek(params.OlderThan.UnixNano())
|
||||||
|
if err == nil {
|
||||||
|
// Read to the next record right away
|
||||||
|
// The one that was specified in the "oldest" param is not needed,
|
||||||
|
// we need only the one next to it
|
||||||
|
_, err = r.ReadNext()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Failed to Seek(): %v", err)
|
log.Debug("Cannot Seek() to %v: %v", params.OlderThan, err)
|
||||||
return entries, oldest, 0
|
return entries, oldest, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -54,12 +60,16 @@ func (l *queryLog) searchFiles(params getDataParams) ([]*logEntry, time.Time, in
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if entry != nil {
|
|
||||||
entries = append(entries, entry)
|
|
||||||
}
|
|
||||||
|
|
||||||
oldestNano = ts
|
oldestNano = ts
|
||||||
total++
|
total++
|
||||||
|
|
||||||
|
if entry != nil {
|
||||||
|
entries = append(entries, entry)
|
||||||
|
if len(entries) == getDataLimit {
|
||||||
|
// Do not read more than "getDataLimit" records at once
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
oldest = time.Unix(0, oldestNano)
|
oldest = time.Unix(0, oldestNano)
|
||||||
|
|
Loading…
Reference in a new issue