2019-08-26 11:54:38 +03:00
|
|
|
package querylog
|
2018-12-05 14:03:41 +03:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2019-08-26 11:54:38 +03:00
|
|
|
"os"
|
2019-02-10 20:47:43 +03:00
|
|
|
"path/filepath"
|
2018-12-05 14:03:41 +03:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/AdguardTeam/AdGuardHome/dnsfilter"
|
2019-02-25 16:44:22 +03:00
|
|
|
"github.com/AdguardTeam/golibs/log"
|
2018-12-05 14:03:41 +03:00
|
|
|
"github.com/miekg/dns"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2019-08-26 11:54:38 +03:00
|
|
|
queryLogFileName = "querylog.json" // .gz added during compression
|
2019-09-16 17:07:18 +03:00
|
|
|
getDataLimit = 500 // GetData(): maximum log entries to return
|
|
|
|
|
2019-10-24 20:00:58 +03:00
|
|
|
// maximum entries to parse when searching
|
|
|
|
maxSearchEntries = 50000
|
2018-12-05 14:03:41 +03:00
|
|
|
)
|
|
|
|
|
2019-02-10 20:47:43 +03:00
|
|
|
// queryLog is a structure that writes and reads the DNS query log
|
|
|
|
type queryLog struct {
|
2019-11-12 15:36:17 +03:00
|
|
|
conf *Config
|
|
|
|
lock sync.Mutex
|
2019-08-08 12:56:02 +03:00
|
|
|
logFile string // path to the log file
|
2019-02-10 20:47:43 +03:00
|
|
|
|
2019-09-27 18:58:57 +03:00
|
|
|
bufferLock sync.RWMutex
|
|
|
|
buffer []*logEntry
|
2019-05-15 13:11:36 +03:00
|
|
|
fileFlushLock sync.Mutex // synchronize a file-flushing goroutine and main thread
|
|
|
|
flushPending bool // don't start another goroutine while the previous one is still running
|
2019-09-27 18:58:57 +03:00
|
|
|
fileWriteLock sync.Mutex
|
2019-02-10 20:47:43 +03:00
|
|
|
}
|
|
|
|
|
2019-09-04 14:12:00 +03:00
|
|
|
// create a new instance of the query log
|
2019-08-26 11:54:38 +03:00
|
|
|
func newQueryLog(conf Config) *queryLog {
|
|
|
|
l := queryLog{}
|
|
|
|
l.logFile = filepath.Join(conf.BaseDir, queryLogFileName)
|
2019-11-12 15:36:17 +03:00
|
|
|
l.conf = &Config{}
|
|
|
|
*l.conf = conf
|
2019-09-27 18:58:57 +03:00
|
|
|
if !checkInterval(l.conf.Interval) {
|
|
|
|
l.conf.Interval = 1
|
|
|
|
}
|
2020-01-16 14:25:40 +03:00
|
|
|
return &l
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *queryLog) Start() {
|
2019-09-27 18:58:57 +03:00
|
|
|
if l.conf.HTTPRegister != nil {
|
|
|
|
l.initWeb()
|
|
|
|
}
|
|
|
|
go l.periodicRotate()
|
2019-08-26 11:54:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (l *queryLog) Close() {
|
|
|
|
_ = l.flushLogBuffer(true)
|
|
|
|
}
|
|
|
|
|
2019-09-27 18:58:57 +03:00
|
|
|
func checkInterval(days uint32) bool {
|
|
|
|
return days == 1 || days == 7 || days == 30 || days == 90
|
|
|
|
}
|
|
|
|
|
|
|
|
func (l *queryLog) WriteDiskConfig(dc *DiskConfig) {
|
|
|
|
dc.Enabled = l.conf.Enabled
|
|
|
|
dc.Interval = l.conf.Interval
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clear memory buffer and remove log files
|
|
|
|
func (l *queryLog) clear() {
|
2019-08-26 11:54:38 +03:00
|
|
|
l.fileFlushLock.Lock()
|
|
|
|
defer l.fileFlushLock.Unlock()
|
|
|
|
|
2019-09-27 18:58:57 +03:00
|
|
|
l.bufferLock.Lock()
|
|
|
|
l.buffer = nil
|
2019-08-26 11:54:38 +03:00
|
|
|
l.flushPending = false
|
2019-09-27 18:58:57 +03:00
|
|
|
l.bufferLock.Unlock()
|
2019-08-26 11:54:38 +03:00
|
|
|
|
|
|
|
err := os.Remove(l.logFile + ".1")
|
2019-10-07 12:03:56 +03:00
|
|
|
if err != nil && !os.IsNotExist(err) {
|
2019-08-26 11:54:38 +03:00
|
|
|
log.Error("file remove: %s: %s", l.logFile+".1", err)
|
2019-02-10 20:47:43 +03:00
|
|
|
}
|
2019-08-26 11:54:38 +03:00
|
|
|
|
|
|
|
err = os.Remove(l.logFile)
|
2019-10-07 12:03:56 +03:00
|
|
|
if err != nil && !os.IsNotExist(err) {
|
2019-08-26 11:54:38 +03:00
|
|
|
log.Error("file remove: %s: %s", l.logFile, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Debug("Query log: cleared")
|
2019-02-10 20:47:43 +03:00
|
|
|
}
|
2018-12-05 14:03:41 +03:00
|
|
|
|
|
|
|
type logEntry struct {
|
2019-10-24 20:00:58 +03:00
|
|
|
IP string `json:"IP"`
|
|
|
|
Time time.Time `json:"T"`
|
|
|
|
|
|
|
|
QHost string `json:"QH"`
|
|
|
|
QType string `json:"QT"`
|
|
|
|
QClass string `json:"QC"`
|
|
|
|
|
2019-11-21 16:13:19 +03:00
|
|
|
Answer []byte `json:",omitempty"` // sometimes empty answers happen like binerdunt.top or rev2.globalrootservers.net
|
|
|
|
OrigAnswer []byte `json:",omitempty"`
|
|
|
|
|
2018-12-06 17:27:38 +03:00
|
|
|
Result dnsfilter.Result
|
2018-12-05 14:03:41 +03:00
|
|
|
Elapsed time.Duration
|
|
|
|
Upstream string `json:",omitempty"` // if empty, means it was cached
|
|
|
|
}
|
|
|
|
|
2019-11-21 16:13:19 +03:00
|
|
|
func (l *queryLog) Add(params AddParams) {
|
2019-09-27 18:58:57 +03:00
|
|
|
if !l.conf.Enabled {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-11-21 16:13:19 +03:00
|
|
|
if params.Question == nil || len(params.Question.Question) != 1 || len(params.Question.Question[0].Name) == 0 ||
|
|
|
|
params.ClientIP == nil {
|
2019-11-12 18:14:33 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-11-21 16:13:19 +03:00
|
|
|
if params.Result == nil {
|
|
|
|
params.Result = &dnsfilter.Result{}
|
2018-12-06 17:27:38 +03:00
|
|
|
}
|
|
|
|
|
2018-12-05 14:03:41 +03:00
|
|
|
now := time.Now()
|
|
|
|
entry := logEntry{
|
2019-11-21 16:13:19 +03:00
|
|
|
IP: params.ClientIP.String(),
|
2019-10-24 20:00:58 +03:00
|
|
|
Time: now,
|
|
|
|
|
2019-11-21 16:13:19 +03:00
|
|
|
Result: *params.Result,
|
|
|
|
Elapsed: params.Elapsed,
|
|
|
|
Upstream: params.Upstream,
|
2018-12-05 14:03:41 +03:00
|
|
|
}
|
2019-11-21 16:13:19 +03:00
|
|
|
q := params.Question.Question[0]
|
2019-10-24 20:00:58 +03:00
|
|
|
entry.QHost = strings.ToLower(q.Name[:len(q.Name)-1]) // remove the last dot
|
|
|
|
entry.QType = dns.Type(q.Qtype).String()
|
|
|
|
entry.QClass = dns.Class(q.Qclass).String()
|
2018-12-05 14:03:41 +03:00
|
|
|
|
2019-11-21 16:13:19 +03:00
|
|
|
if params.Answer != nil {
|
|
|
|
a, err := params.Answer.Pack()
|
|
|
|
if err != nil {
|
|
|
|
log.Info("Querylog: Answer.Pack(): %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
entry.Answer = a
|
|
|
|
}
|
|
|
|
|
|
|
|
if params.OrigAnswer != nil {
|
|
|
|
a, err := params.OrigAnswer.Pack()
|
|
|
|
if err != nil {
|
|
|
|
log.Info("Querylog: OrigAnswer.Pack(): %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
entry.OrigAnswer = a
|
|
|
|
}
|
|
|
|
|
2019-09-27 18:58:57 +03:00
|
|
|
l.bufferLock.Lock()
|
|
|
|
l.buffer = append(l.buffer, &entry)
|
2019-05-15 13:11:36 +03:00
|
|
|
needFlush := false
|
|
|
|
if !l.flushPending {
|
2019-11-08 12:31:50 +03:00
|
|
|
needFlush = len(l.buffer) >= int(l.conf.MemSize)
|
2019-05-15 13:11:36 +03:00
|
|
|
if needFlush {
|
|
|
|
l.flushPending = true
|
|
|
|
}
|
2018-12-05 14:03:41 +03:00
|
|
|
}
|
2019-09-27 18:58:57 +03:00
|
|
|
l.bufferLock.Unlock()
|
2018-12-05 14:03:41 +03:00
|
|
|
|
|
|
|
// if buffer needs to be flushed to disk, do it now
|
2019-05-15 13:11:36 +03:00
|
|
|
if needFlush {
|
2018-12-05 14:03:41 +03:00
|
|
|
// write to file
|
|
|
|
// do it in separate goroutine -- we are stalling DNS response this whole time
|
2019-06-06 22:42:17 +03:00
|
|
|
go l.flushLogBuffer(false) // nolint
|
2018-12-05 14:03:41 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-27 18:58:57 +03:00
|
|
|
// Parameters for getData()
|
|
|
|
type getDataParams struct {
|
|
|
|
OlderThan time.Time // return entries that are older than this value
|
|
|
|
Domain string // filter by domain name in question
|
|
|
|
Client string // filter by client IP
|
2019-10-24 20:00:58 +03:00
|
|
|
QuestionType string // filter by question type
|
2019-09-27 18:58:57 +03:00
|
|
|
ResponseStatus responseStatusType // filter by response status
|
|
|
|
StrictMatchDomain bool // if Domain value must be matched strictly
|
|
|
|
StrictMatchClient bool // if Client value must be matched strictly
|
|
|
|
}
|
|
|
|
|
|
|
|
// Response status
|
|
|
|
type responseStatusType int32
|
|
|
|
|
|
|
|
// Response status constants
|
|
|
|
const (
|
|
|
|
responseStatusAll responseStatusType = iota + 1
|
|
|
|
responseStatusFiltered
|
|
|
|
)
|
|
|
|
|
2020-02-21 01:07:30 +03:00
|
|
|
// Gets log entries
|
2019-10-24 20:00:58 +03:00
|
|
|
func (l *queryLog) getData(params getDataParams) map[string]interface{} {
|
2019-09-16 17:07:18 +03:00
|
|
|
now := time.Now()
|
|
|
|
|
|
|
|
// add from file
|
2020-02-21 01:07:30 +03:00
|
|
|
fileEntries, oldest, total := l.searchFiles(params)
|
2019-09-16 17:07:18 +03:00
|
|
|
|
|
|
|
if params.OlderThan.IsZero() {
|
|
|
|
params.OlderThan = now
|
|
|
|
}
|
|
|
|
|
|
|
|
// add from memory buffer
|
2019-09-27 18:58:57 +03:00
|
|
|
l.bufferLock.Lock()
|
|
|
|
total += len(l.buffer)
|
2020-02-21 01:07:30 +03:00
|
|
|
memoryEntries := make([]*logEntry, 0)
|
2019-09-27 18:58:57 +03:00
|
|
|
for _, entry := range l.buffer {
|
2020-02-21 01:07:30 +03:00
|
|
|
if !matchesGetDataParams(entry, params) {
|
2019-09-16 17:07:18 +03:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if entry.Time.UnixNano() >= params.OlderThan.UnixNano() {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
|
2020-02-21 01:07:30 +03:00
|
|
|
memoryEntries = append(memoryEntries, entry)
|
2019-09-16 17:07:18 +03:00
|
|
|
}
|
2019-09-27 18:58:57 +03:00
|
|
|
l.bufferLock.Unlock()
|
2019-09-16 17:07:18 +03:00
|
|
|
|
2020-02-21 01:07:30 +03:00
|
|
|
// now let's get a unified collection
|
|
|
|
entries := append(memoryEntries, fileEntries...)
|
|
|
|
if len(entries) > getDataLimit {
|
|
|
|
// remove extra records
|
|
|
|
entries = entries[(len(entries) - getDataLimit):]
|
|
|
|
}
|
2018-12-05 14:03:41 +03:00
|
|
|
|
2020-02-21 01:07:30 +03:00
|
|
|
// init the response object
|
|
|
|
var data = []map[string]interface{}{}
|
2019-11-21 16:13:19 +03:00
|
|
|
|
2020-02-21 01:07:30 +03:00
|
|
|
// the elements order is already reversed (from newer to older)
|
|
|
|
for i := 0; i < len(entries); i++ {
|
|
|
|
entry := entries[i]
|
|
|
|
jsonEntry := logEntryToJSONEntry(entry)
|
2018-12-05 14:03:41 +03:00
|
|
|
data = append(data, jsonEntry)
|
|
|
|
}
|
|
|
|
|
2019-09-16 17:07:18 +03:00
|
|
|
log.Debug("QueryLog: prepared data (%d/%d) older than %s in %s",
|
|
|
|
len(entries), total, params.OlderThan, time.Since(now))
|
2019-10-24 20:00:58 +03:00
|
|
|
|
|
|
|
var result = map[string]interface{}{}
|
|
|
|
if len(entries) == getDataLimit {
|
|
|
|
oldest = entries[0].Time
|
|
|
|
}
|
|
|
|
result["oldest"] = ""
|
|
|
|
if !oldest.IsZero() {
|
|
|
|
result["oldest"] = oldest.Format(time.RFC3339Nano)
|
|
|
|
}
|
|
|
|
result["data"] = data
|
|
|
|
return result
|
2018-12-05 14:03:41 +03:00
|
|
|
}
|
2018-12-24 23:06:36 +03:00
|
|
|
|
2020-02-21 01:07:30 +03:00
|
|
|
func logEntryToJSONEntry(entry *logEntry) map[string]interface{} {
|
|
|
|
var msg *dns.Msg
|
|
|
|
|
|
|
|
if len(entry.Answer) > 0 {
|
|
|
|
msg = new(dns.Msg)
|
|
|
|
if err := msg.Unpack(entry.Answer); err != nil {
|
|
|
|
log.Debug("Failed to unpack dns message answer: %s: %s", err, string(entry.Answer))
|
|
|
|
msg = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
jsonEntry := map[string]interface{}{
|
|
|
|
"reason": entry.Result.Reason.String(),
|
|
|
|
"elapsedMs": strconv.FormatFloat(entry.Elapsed.Seconds()*1000, 'f', -1, 64),
|
|
|
|
"time": entry.Time.Format(time.RFC3339Nano),
|
|
|
|
"client": entry.IP,
|
|
|
|
}
|
|
|
|
jsonEntry["question"] = map[string]interface{}{
|
|
|
|
"host": entry.QHost,
|
|
|
|
"type": entry.QType,
|
|
|
|
"class": entry.QClass,
|
|
|
|
}
|
|
|
|
|
|
|
|
if msg != nil {
|
|
|
|
jsonEntry["status"] = dns.RcodeToString[msg.Rcode]
|
|
|
|
}
|
|
|
|
if len(entry.Result.Rule) > 0 {
|
|
|
|
jsonEntry["rule"] = entry.Result.Rule
|
|
|
|
jsonEntry["filterId"] = entry.Result.FilterID
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(entry.Result.ServiceName) != 0 {
|
|
|
|
jsonEntry["service_name"] = entry.Result.ServiceName
|
|
|
|
}
|
|
|
|
|
|
|
|
answers := answerToMap(msg)
|
|
|
|
if answers != nil {
|
|
|
|
jsonEntry["answer"] = answers
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(entry.OrigAnswer) != 0 {
|
|
|
|
a := new(dns.Msg)
|
|
|
|
err := a.Unpack(entry.OrigAnswer)
|
|
|
|
if err == nil {
|
|
|
|
answers = answerToMap(a)
|
|
|
|
if answers != nil {
|
|
|
|
jsonEntry["original_answer"] = answers
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
log.Debug("Querylog: msg.Unpack(entry.OrigAnswer): %s: %s", err, string(entry.OrigAnswer))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return jsonEntry
|
|
|
|
}
|
|
|
|
|
2019-01-24 20:11:01 +03:00
|
|
|
func answerToMap(a *dns.Msg) []map[string]interface{} {
|
|
|
|
if a == nil || len(a.Answer) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
var answers = []map[string]interface{}{}
|
|
|
|
for _, k := range a.Answer {
|
|
|
|
header := k.Header()
|
|
|
|
answer := map[string]interface{}{
|
|
|
|
"type": dns.TypeToString[header.Rrtype],
|
|
|
|
"ttl": header.Ttl,
|
|
|
|
}
|
|
|
|
// try most common record types
|
|
|
|
switch v := k.(type) {
|
|
|
|
case *dns.A:
|
2019-11-19 14:09:40 +03:00
|
|
|
answer["value"] = v.A.String()
|
2019-01-24 20:11:01 +03:00
|
|
|
case *dns.AAAA:
|
2019-11-19 14:09:40 +03:00
|
|
|
answer["value"] = v.AAAA.String()
|
2019-01-24 20:11:01 +03:00
|
|
|
case *dns.MX:
|
|
|
|
answer["value"] = fmt.Sprintf("%v %v", v.Preference, v.Mx)
|
|
|
|
case *dns.CNAME:
|
|
|
|
answer["value"] = v.Target
|
|
|
|
case *dns.NS:
|
|
|
|
answer["value"] = v.Ns
|
|
|
|
case *dns.SPF:
|
|
|
|
answer["value"] = v.Txt
|
|
|
|
case *dns.TXT:
|
|
|
|
answer["value"] = v.Txt
|
|
|
|
case *dns.PTR:
|
|
|
|
answer["value"] = v.Ptr
|
|
|
|
case *dns.SOA:
|
|
|
|
answer["value"] = fmt.Sprintf("%v %v %v %v %v %v %v", v.Ns, v.Mbox, v.Serial, v.Refresh, v.Retry, v.Expire, v.Minttl)
|
|
|
|
case *dns.CAA:
|
|
|
|
answer["value"] = fmt.Sprintf("%v %v \"%v\"", v.Flag, v.Tag, v.Value)
|
|
|
|
case *dns.HINFO:
|
|
|
|
answer["value"] = fmt.Sprintf("\"%v\" \"%v\"", v.Cpu, v.Os)
|
|
|
|
case *dns.RRSIG:
|
|
|
|
answer["value"] = fmt.Sprintf("%v %v %v %v %v %v %v %v %v", dns.TypeToString[v.TypeCovered], v.Algorithm, v.Labels, v.OrigTtl, v.Expiration, v.Inception, v.KeyTag, v.SignerName, v.Signature)
|
|
|
|
default:
|
|
|
|
// type unknown, marshall it as-is
|
|
|
|
answer["value"] = v
|
|
|
|
}
|
|
|
|
answers = append(answers, answer)
|
|
|
|
}
|
|
|
|
|
|
|
|
return answers
|
|
|
|
}
|