From a63fe958aeae690dba4754cd9ecedd449f2770b9 Mon Sep 17 00:00:00 2001
From: Eugene Bujak <hmage@hmage.net>
Date: Sun, 7 Oct 2018 02:17:22 +0300
Subject: [PATCH] Querylog -- Read from querylog files when answering to
 /querylog API, it now survives restarts.

---
 coredns_plugin/querylog.go      |  5 ++
 coredns_plugin/querylog_file.go | 87 +++++++++++++++++++++++++++++++++
 2 files changed, 92 insertions(+)

diff --git a/coredns_plugin/querylog.go b/coredns_plugin/querylog.go
index eb848fb8..381ac16a 100644
--- a/coredns_plugin/querylog.go
+++ b/coredns_plugin/querylog.go
@@ -88,6 +88,11 @@ func handleQueryLog(w http.ResponseWriter, r *http.Request) {
 	logBufferLock.RLock()
 	values := logBuffer
 	logBufferLock.RUnlock()
+
+	if len(values) < queryLogAPI {
+		values = appendFromLogFile(values, queryLogAPI, time.Hour*24)
+	}
+
 	var data = []map[string]interface{}{}
 	for _, entry := range values {
 		var q *dns.Msg
diff --git a/coredns_plugin/querylog_file.go b/coredns_plugin/querylog_file.go
index 1cfa93a7..412817ed 100644
--- a/coredns_plugin/querylog_file.go
+++ b/coredns_plugin/querylog_file.go
@@ -144,3 +144,90 @@ func periodicQueryLogRotate(t time.Duration) {
 		}
 	}
 }
+
+func appendFromLogFile(values []logEntry, maxLen int, timeWindow time.Duration) []logEntry {
+	now := time.Now()
+	// read from querylog files, try newest file first
+	files := []string{
+		queryLogFileName + ".gz",
+		queryLogFileName + ".gz.1",
+	}
+
+	a := []logEntry{}
+
+	// read from all files
+	for _, file := range files {
+		if len(a) >= maxLen {
+			// previous file filled us with enough fresh entries
+			break
+		}
+		if _, err := os.Stat(file); os.IsNotExist(err) {
+			// do nothing, file doesn't exist
+			continue
+		}
+
+		trace("Opening file %s", file)
+		f, err := os.Open(file)
+		if err != nil {
+			log.Printf("Failed to open file \"%s\": %s", file, err)
+			// try next file
+			continue
+		}
+		defer f.Close()
+
+		trace("Creating gzip reader")
+		zr, err := gzip.NewReader(f)
+		if err != nil {
+			log.Printf("Failed to create gzip reader: %s", err)
+			continue
+		}
+
+		trace("Creating json decoder")
+		d := json.NewDecoder(zr)
+
+		i := 0
+		// entries on file are in oldest->newest order
+		// we want maxLen newest
+		for d.More() {
+			var entry logEntry
+			err := d.Decode(&entry)
+			if err != nil {
+				log.Printf("Failed to decode: %s", err)
+				// next entry can be fine, try more
+				continue
+			}
+
+			if now.Sub(entry.Time) > timeWindow {
+				trace("skipping entry")
+				continue
+			}
+
+			i++
+			a = append(a, entry)
+			if len(a) > maxLen {
+				toskip := len(a) - maxLen
+				a = a[toskip:]
+			}
+		}
+		err = zr.Close()
+		if err != nil {
+			log.Printf("Encountered error while closing gzip reader: %s", err)
+		}
+		log.Printf("file \"%s\": read %d entries", file, i)
+	}
+
+	// now that we've read all eligible entries, reverse the slice to make it go from newest->oldest
+	for left, right := 0, len(a)-1; left < right; left, right = left+1, right-1 {
+		a[left], a[right] = a[right], a[left]
+	}
+
+	// append it to values
+	values = append(values, a...)
+
+	// then cut off of it is bigger than maxLen
+	if len(values) > maxLen {
+		values = values[:maxLen]
+	}
+
+	return values
+}