2018-08-30 17:25:33 +03:00
|
|
|
package dnsfilter
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bufio"
|
|
|
|
"bytes"
|
2019-04-18 14:31:13 +03:00
|
|
|
"context"
|
2018-08-30 17:25:33 +03:00
|
|
|
"crypto/sha256"
|
|
|
|
"encoding/json"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io/ioutil"
|
2018-10-29 15:46:58 +03:00
|
|
|
"net"
|
2018-08-30 17:25:33 +03:00
|
|
|
"net/http"
|
|
|
|
"strings"
|
|
|
|
"sync/atomic"
|
|
|
|
"time"
|
|
|
|
|
2019-06-18 16:18:13 +03:00
|
|
|
"github.com/joomcode/errorx"
|
|
|
|
|
2019-04-18 14:31:13 +03:00
|
|
|
"github.com/AdguardTeam/dnsproxy/upstream"
|
2019-02-25 16:44:22 +03:00
|
|
|
"github.com/AdguardTeam/golibs/log"
|
2019-05-15 16:46:11 +03:00
|
|
|
"github.com/AdguardTeam/urlfilter"
|
2019-02-28 13:01:41 +03:00
|
|
|
"github.com/bluele/gcache"
|
2019-05-22 12:38:17 +03:00
|
|
|
"github.com/miekg/dns"
|
2018-08-30 17:25:33 +03:00
|
|
|
"golang.org/x/net/publicsuffix"
|
|
|
|
)
|
|
|
|
|
|
|
|
const defaultCacheSize = 64 * 1024 // in number of elements
|
2018-10-20 19:58:39 +03:00
|
|
|
const defaultCacheTime = 30 * time.Minute
|
2018-08-30 17:25:33 +03:00
|
|
|
|
2018-10-20 19:58:39 +03:00
|
|
|
const defaultHTTPTimeout = 5 * time.Minute
|
2018-08-30 17:25:33 +03:00
|
|
|
const defaultHTTPMaxIdleConnections = 100
|
|
|
|
|
|
|
|
const defaultSafebrowsingServer = "sb.adtidy.org"
|
2019-05-27 18:11:05 +03:00
|
|
|
const defaultSafebrowsingURL = "%s://%s/safebrowsing-lookup-hash.html?prefixes=%s"
|
2018-09-10 20:34:42 +03:00
|
|
|
const defaultParentalServer = "pctrl.adguard.com"
|
2019-05-27 18:11:05 +03:00
|
|
|
const defaultParentalURL = "%s://%s/check-parental-control-hash?prefixes=%s&sensitivity=%d"
|
2019-06-06 22:42:17 +03:00
|
|
|
const defaultParentalSensitivity = 13 // use "TEEN" by default
|
|
|
|
const maxDialCacheSize = 2 // the number of host names for safebrowsing and parental control
|
2018-08-30 17:25:33 +03:00
|
|
|
|
2019-05-28 14:14:12 +03:00
|
|
|
// Custom filtering settings
|
|
|
|
type RequestFilteringSettings struct {
|
|
|
|
FilteringEnabled bool
|
|
|
|
SafeSearchEnabled bool
|
|
|
|
SafeBrowsingEnabled bool
|
|
|
|
ParentalEnabled bool
|
|
|
|
}
|
|
|
|
|
2018-11-30 13:32:51 +03:00
|
|
|
// Config allows you to configure DNS filtering with New() or just change variables directly.
|
|
|
|
type Config struct {
|
2019-07-04 14:00:20 +03:00
|
|
|
ParentalSensitivity int `yaml:"parental_sensitivity"` // must be either 3, 10, 13 or 17
|
|
|
|
ParentalEnabled bool `yaml:"parental_enabled"`
|
|
|
|
UsePlainHTTP bool `yaml:"-"` // use plain HTTP for requests to parental and safe browsing servers
|
|
|
|
SafeSearchEnabled bool `yaml:"safesearch_enabled"`
|
|
|
|
SafeBrowsingEnabled bool `yaml:"safebrowsing_enabled"`
|
|
|
|
ResolverAddress string // DNS server address
|
2019-05-28 14:14:12 +03:00
|
|
|
|
|
|
|
// Filtering callback function
|
|
|
|
FilterHandler func(clientAddr string, settings *RequestFilteringSettings) `yaml:"-"`
|
2018-11-30 13:32:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
type privateConfig struct {
|
|
|
|
parentalServer string // access via methods
|
|
|
|
safeBrowsingServer string // access via methods
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// LookupStats store stats collected during safebrowsing or parental checks
|
2018-08-30 17:25:33 +03:00
|
|
|
type LookupStats struct {
|
|
|
|
Requests uint64 // number of HTTP requests that were sent
|
|
|
|
CacheHits uint64 // number of lookups that didn't need HTTP requests
|
|
|
|
Pending int64 // number of currently pending HTTP requests
|
|
|
|
PendingMax int64 // maximum number of pending HTTP requests
|
|
|
|
}
|
|
|
|
|
2019-02-22 16:34:36 +03:00
|
|
|
// Stats store LookupStats for safebrowsing, parental and safesearch
|
2018-08-30 17:25:33 +03:00
|
|
|
type Stats struct {
|
|
|
|
Safebrowsing LookupStats
|
|
|
|
Parental LookupStats
|
2019-02-22 16:34:36 +03:00
|
|
|
Safesearch LookupStats
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Dnsfilter holds added rules and performs hostname matches against the rules
|
|
|
|
type Dnsfilter struct {
|
2019-07-04 14:00:20 +03:00
|
|
|
rulesStorage *urlfilter.RuleStorage
|
2019-05-15 16:46:11 +03:00
|
|
|
filteringEngine *urlfilter.DNSEngine
|
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
// HTTP lookups for safebrowsing and parental
|
|
|
|
client http.Client // handle for http client -- single instance as recommended by docs
|
|
|
|
transport *http.Transport // handle for http transport used by http client
|
|
|
|
|
2018-11-30 13:32:51 +03:00
|
|
|
Config // for direct access by library users, even a = assignment
|
|
|
|
privateConfig
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
|
2019-01-24 20:11:01 +03:00
|
|
|
// Filter represents a filter list
|
2018-11-30 13:24:42 +03:00
|
|
|
type Filter struct {
|
2019-07-04 14:00:20 +03:00
|
|
|
ID int64 `json:"id"` // auto-assigned when filter is added (see nextFilterID), json by default keeps ID uppercase but we need lowercase
|
|
|
|
Data []byte `json:"-" yaml:"-"` // List of rules divided by '\n'
|
|
|
|
FilePath string `json:"-" yaml:"-"` // Path to a filtering rules file
|
2018-11-30 13:24:42 +03:00
|
|
|
}
|
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
//go:generate stringer -type=Reason
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// Reason holds an enum detailing why it was filtered or not filtered
|
2018-08-30 17:25:33 +03:00
|
|
|
type Reason int
|
|
|
|
|
|
|
|
const (
|
|
|
|
// reasons for not filtering
|
2019-01-24 20:11:01 +03:00
|
|
|
|
|
|
|
// NotFilteredNotFound - host was not find in any checks, default value for result
|
|
|
|
NotFilteredNotFound Reason = iota
|
|
|
|
// NotFilteredWhiteList - the host is explicitly whitelisted
|
|
|
|
NotFilteredWhiteList
|
|
|
|
// NotFilteredError - there was a transitive error during check
|
|
|
|
NotFilteredError
|
2018-08-30 17:25:33 +03:00
|
|
|
|
|
|
|
// reasons for filtering
|
2019-01-24 20:11:01 +03:00
|
|
|
|
|
|
|
// FilteredBlackList - the host was matched to be advertising host
|
|
|
|
FilteredBlackList
|
|
|
|
// FilteredSafeBrowsing - the host was matched to be malicious/phishing
|
|
|
|
FilteredSafeBrowsing
|
|
|
|
// FilteredParental - the host was matched to be outside of parental control settings
|
|
|
|
FilteredParental
|
|
|
|
// FilteredInvalid - the request was invalid and was not processed
|
|
|
|
FilteredInvalid
|
|
|
|
// FilteredSafeSearch - the host was replaced with safesearch variant
|
|
|
|
FilteredSafeSearch
|
2018-08-30 17:25:33 +03:00
|
|
|
)
|
|
|
|
|
2019-06-27 10:48:12 +03:00
|
|
|
type dnsFilterContext struct {
|
2018-08-30 17:25:33 +03:00
|
|
|
stats Stats
|
2019-05-13 14:47:55 +03:00
|
|
|
dialCache gcache.Cache // "host" -> "IP" cache for safebrowsing and parental control servers
|
2018-10-04 13:38:52 +03:00
|
|
|
safebrowsingCache gcache.Cache
|
|
|
|
parentalCache gcache.Cache
|
2019-02-22 16:34:36 +03:00
|
|
|
safeSearchCache gcache.Cache
|
2019-06-24 19:00:03 +03:00
|
|
|
}
|
|
|
|
|
2019-06-27 10:48:12 +03:00
|
|
|
var gctx dnsFilterContext // global dnsfilter context
|
2018-08-30 17:25:33 +03:00
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// Result holds state of hostname check
|
2018-08-30 17:25:33 +03:00
|
|
|
type Result struct {
|
2018-10-29 15:46:58 +03:00
|
|
|
IsFiltered bool `json:",omitempty"` // True if the host name is filtered
|
|
|
|
Reason Reason `json:",omitempty"` // Reason for blocking / unblocking
|
|
|
|
Rule string `json:",omitempty"` // Original rule text
|
2019-01-24 20:11:01 +03:00
|
|
|
IP net.IP `json:",omitempty"` // Not nil only in the case of a hosts file syntax
|
2018-10-30 17:16:20 +03:00
|
|
|
FilterID int64 `json:",omitempty"` // Filter ID the rule belongs to
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// Matched can be used to see if any match at all was found, no matter filtered or not
|
2018-08-30 17:25:33 +03:00
|
|
|
func (r Reason) Matched() bool {
|
|
|
|
return r != NotFilteredNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
// CheckHost tries to match host against rules, then safebrowsing and parental if they are enabled
|
2019-05-28 14:14:12 +03:00
|
|
|
func (d *Dnsfilter) CheckHost(host string, qtype uint16, clientAddr string) (Result, error) {
|
2018-10-05 07:31:56 +03:00
|
|
|
// sometimes DNS clients will try to resolve ".", which is a request to get root servers
|
2018-08-30 17:25:33 +03:00
|
|
|
if host == "" {
|
2018-10-05 07:31:56 +03:00
|
|
|
return Result{Reason: NotFilteredNotFound}, nil
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
2018-09-10 20:34:42 +03:00
|
|
|
host = strings.ToLower(host)
|
2019-04-24 12:38:05 +03:00
|
|
|
// prevent recursion
|
|
|
|
if host == d.parentalServer || host == d.safeBrowsingServer {
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
2018-08-30 17:25:33 +03:00
|
|
|
|
2019-05-28 14:14:12 +03:00
|
|
|
var setts RequestFilteringSettings
|
|
|
|
setts.FilteringEnabled = true
|
|
|
|
setts.SafeSearchEnabled = d.SafeSearchEnabled
|
|
|
|
setts.SafeBrowsingEnabled = d.SafeBrowsingEnabled
|
|
|
|
setts.ParentalEnabled = d.ParentalEnabled
|
|
|
|
if len(clientAddr) != 0 && d.FilterHandler != nil {
|
|
|
|
d.FilterHandler(clientAddr, &setts)
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
2019-05-28 14:14:12 +03:00
|
|
|
|
|
|
|
var result Result
|
|
|
|
var err error
|
|
|
|
// try filter lists first
|
|
|
|
if setts.FilteringEnabled {
|
|
|
|
result, err = d.matchHost(host, qtype)
|
|
|
|
if err != nil {
|
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
if result.Reason.Matched() {
|
|
|
|
return result, nil
|
|
|
|
}
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
|
2019-02-22 16:34:36 +03:00
|
|
|
// check safeSearch if no match
|
2019-05-28 14:14:12 +03:00
|
|
|
if setts.SafeSearchEnabled {
|
2019-02-22 16:34:36 +03:00
|
|
|
result, err = d.checkSafeSearch(host)
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("Failed to safesearch HTTP lookup, ignoring check: %v", err)
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if result.Reason.Matched() {
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
// check safebrowsing if no match
|
2019-05-28 14:14:12 +03:00
|
|
|
if setts.SafeBrowsingEnabled {
|
2018-08-30 17:25:33 +03:00
|
|
|
result, err = d.checkSafeBrowsing(host)
|
|
|
|
if err != nil {
|
|
|
|
// failed to do HTTP lookup -- treat it as if we got empty response, but don't save cache
|
|
|
|
log.Printf("Failed to do safebrowsing HTTP lookup, ignoring check: %v", err)
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
if result.Reason.Matched() {
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// check parental if no match
|
2019-05-28 14:14:12 +03:00
|
|
|
if setts.ParentalEnabled {
|
2018-08-30 17:25:33 +03:00
|
|
|
result, err = d.checkParental(host)
|
|
|
|
if err != nil {
|
|
|
|
// failed to do HTTP lookup -- treat it as if we got empty response, but don't save cache
|
|
|
|
log.Printf("Failed to do parental HTTP lookup, ignoring check: %v", err)
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
if result.Reason.Matched() {
|
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// nothing matched, return nothing
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func getCachedReason(cache gcache.Cache, host string) (result Result, isFound bool, err error) {
|
|
|
|
isFound = false // not found yet
|
|
|
|
|
|
|
|
// get raw value
|
|
|
|
rawValue, err := cache.Get(host)
|
|
|
|
if err == gcache.KeyNotFoundError {
|
|
|
|
// not a real error, just not found
|
|
|
|
err = nil
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
// real error
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// since it can be something else, validate that it belongs to proper type
|
|
|
|
cachedValue, ok := rawValue.(Result)
|
2018-09-14 16:50:56 +03:00
|
|
|
if !ok {
|
2018-08-30 17:25:33 +03:00
|
|
|
// this is not our type -- error
|
|
|
|
text := "SHOULD NOT HAPPEN: entry with invalid type was found in lookup cache"
|
|
|
|
log.Println(text)
|
|
|
|
err = errors.New(text)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
isFound = ok
|
|
|
|
return cachedValue, isFound, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// for each dot, hash it and add it to string
|
|
|
|
func hostnameToHashParam(host string, addslash bool) (string, map[string]bool) {
|
|
|
|
var hashparam bytes.Buffer
|
|
|
|
hashes := map[string]bool{}
|
|
|
|
tld, icann := publicsuffix.PublicSuffix(host)
|
2018-09-14 16:50:56 +03:00
|
|
|
if !icann {
|
2018-08-30 17:25:33 +03:00
|
|
|
// private suffixes like cloudfront.net
|
|
|
|
tld = ""
|
|
|
|
}
|
|
|
|
curhost := host
|
|
|
|
for {
|
|
|
|
if curhost == "" {
|
|
|
|
// we've reached end of string
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if tld != "" && curhost == tld {
|
|
|
|
// we've reached the TLD, don't hash it
|
|
|
|
break
|
|
|
|
}
|
|
|
|
tohash := []byte(curhost)
|
|
|
|
if addslash {
|
|
|
|
tohash = append(tohash, '/')
|
|
|
|
}
|
|
|
|
sum := sha256.Sum256(tohash)
|
|
|
|
hexhash := fmt.Sprintf("%X", sum)
|
|
|
|
hashes[hexhash] = true
|
|
|
|
hashparam.WriteString(fmt.Sprintf("%02X%02X%02X%02X/", sum[0], sum[1], sum[2], sum[3]))
|
|
|
|
pos := strings.IndexByte(curhost, byte('.'))
|
|
|
|
if pos < 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
curhost = curhost[pos+1:]
|
|
|
|
}
|
|
|
|
return hashparam.String(), hashes
|
|
|
|
}
|
|
|
|
|
2019-02-22 16:34:36 +03:00
|
|
|
func (d *Dnsfilter) checkSafeSearch(host string) (Result, error) {
|
2019-03-18 14:50:33 +03:00
|
|
|
if log.GetLevel() >= log.DEBUG {
|
|
|
|
timer := log.StartTimer()
|
|
|
|
defer timer.LogElapsed("SafeSearch HTTP lookup for %s", host)
|
|
|
|
}
|
|
|
|
|
2019-02-22 16:34:36 +03:00
|
|
|
// Check cache. Return cached result if it was found
|
2019-06-24 19:00:03 +03:00
|
|
|
cachedValue, isFound, err := getCachedReason(gctx.safeSearchCache, host)
|
2019-02-22 16:34:36 +03:00
|
|
|
if isFound {
|
2019-06-24 19:00:03 +03:00
|
|
|
atomic.AddUint64(&gctx.stats.Safesearch.CacheHits, 1)
|
2019-03-18 14:50:33 +03:00
|
|
|
log.Tracef("%s: found in SafeSearch cache", host)
|
2019-02-22 16:34:36 +03:00
|
|
|
return cachedValue, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return Result{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
safeHost, ok := d.SafeSearchDomain(host)
|
|
|
|
if !ok {
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
|
2019-02-22 16:41:30 +03:00
|
|
|
res := Result{IsFiltered: true, Reason: FilteredSafeSearch}
|
2019-02-22 16:34:36 +03:00
|
|
|
if ip := net.ParseIP(safeHost); ip != nil {
|
|
|
|
res.IP = ip
|
2019-06-24 19:00:03 +03:00
|
|
|
err = gctx.safeSearchCache.Set(host, res)
|
2019-02-22 16:34:36 +03:00
|
|
|
if err != nil {
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
2019-02-25 17:15:50 +03:00
|
|
|
// TODO this address should be resolved with upstream that was configured in dnsforward
|
2019-02-22 16:34:36 +03:00
|
|
|
addrs, err := net.LookupIP(safeHost)
|
|
|
|
if err != nil {
|
|
|
|
log.Tracef("SafeSearchDomain for %s was found but failed to lookup for %s cause %s", host, safeHost, err)
|
|
|
|
return Result{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, i := range addrs {
|
2019-04-23 15:08:41 +03:00
|
|
|
if ipv4 := i.To4(); ipv4 != nil {
|
2019-02-22 16:34:36 +03:00
|
|
|
res.IP = ipv4
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-23 15:08:41 +03:00
|
|
|
if len(res.IP) == 0 {
|
|
|
|
return Result{}, fmt.Errorf("no ipv4 addresses in safe search response for %s", safeHost)
|
|
|
|
}
|
|
|
|
|
2019-02-22 16:34:36 +03:00
|
|
|
// Cache result
|
2019-06-24 19:00:03 +03:00
|
|
|
err = gctx.safeSearchCache.Set(host, res)
|
2019-02-22 16:34:36 +03:00
|
|
|
if err != nil {
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
func (d *Dnsfilter) checkSafeBrowsing(host string) (Result, error) {
|
2019-03-18 14:50:33 +03:00
|
|
|
if log.GetLevel() >= log.DEBUG {
|
|
|
|
timer := log.StartTimer()
|
|
|
|
defer timer.LogElapsed("SafeBrowsing HTTP lookup for %s", host)
|
|
|
|
}
|
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
format := func(hashparam string) string {
|
2019-05-27 18:11:05 +03:00
|
|
|
schema := "https"
|
|
|
|
if d.UsePlainHTTP {
|
|
|
|
schema = "http"
|
|
|
|
}
|
|
|
|
url := fmt.Sprintf(defaultSafebrowsingURL, schema, d.safeBrowsingServer, hashparam)
|
2018-08-30 17:25:33 +03:00
|
|
|
return url
|
|
|
|
}
|
|
|
|
handleBody := func(body []byte, hashes map[string]bool) (Result, error) {
|
|
|
|
result := Result{}
|
|
|
|
scanner := bufio.NewScanner(strings.NewReader(string(body)))
|
|
|
|
for scanner.Scan() {
|
|
|
|
line := scanner.Text()
|
|
|
|
splitted := strings.Split(line, ":")
|
|
|
|
if len(splitted) < 3 {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
hash := splitted[2]
|
|
|
|
if _, ok := hashes[hash]; ok {
|
|
|
|
// it's in the hash
|
|
|
|
result.IsFiltered = true
|
|
|
|
result.Reason = FilteredSafeBrowsing
|
|
|
|
result.Rule = splitted[0]
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := scanner.Err(); err != nil {
|
|
|
|
// error, don't save cache
|
|
|
|
return Result{}, err
|
|
|
|
}
|
|
|
|
return result, nil
|
|
|
|
}
|
2019-06-24 19:00:03 +03:00
|
|
|
result, err := d.lookupCommon(host, &gctx.stats.Safebrowsing, gctx.safebrowsingCache, true, format, handleBody)
|
2018-08-30 17:25:33 +03:00
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *Dnsfilter) checkParental(host string) (Result, error) {
|
2019-03-18 14:50:33 +03:00
|
|
|
if log.GetLevel() >= log.DEBUG {
|
|
|
|
timer := log.StartTimer()
|
|
|
|
defer timer.LogElapsed("Parental HTTP lookup for %s", host)
|
|
|
|
}
|
|
|
|
|
2018-09-07 16:10:43 +03:00
|
|
|
format := func(hashparam string) string {
|
2019-05-27 18:11:05 +03:00
|
|
|
schema := "https"
|
|
|
|
if d.UsePlainHTTP {
|
|
|
|
schema = "http"
|
|
|
|
}
|
2019-06-06 22:42:17 +03:00
|
|
|
sensitivity := d.ParentalSensitivity
|
|
|
|
if sensitivity == 0 {
|
|
|
|
sensitivity = defaultParentalSensitivity
|
|
|
|
}
|
|
|
|
url := fmt.Sprintf(defaultParentalURL, schema, d.parentalServer, hashparam, sensitivity)
|
2018-08-30 17:25:33 +03:00
|
|
|
return url
|
|
|
|
}
|
2018-09-07 16:10:43 +03:00
|
|
|
handleBody := func(body []byte, hashes map[string]bool) (Result, error) {
|
2018-08-30 17:25:33 +03:00
|
|
|
// parse json
|
|
|
|
var m []struct {
|
|
|
|
Blocked bool `json:"blocked"`
|
|
|
|
ClientTTL int `json:"clientTtl"`
|
|
|
|
Reason string `json:"reason"`
|
2018-09-17 01:41:39 +03:00
|
|
|
Hash string `json:"hash"`
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
err := json.Unmarshal(body, &m)
|
|
|
|
if err != nil {
|
|
|
|
// error, don't save cache
|
|
|
|
log.Printf("Couldn't parse json '%s': %s", body, err)
|
|
|
|
return Result{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
result := Result{}
|
|
|
|
|
|
|
|
for i := range m {
|
2018-09-17 01:41:39 +03:00
|
|
|
if !hashes[m[i].Hash] {
|
|
|
|
continue
|
|
|
|
}
|
2018-08-30 17:25:33 +03:00
|
|
|
if m[i].Blocked {
|
|
|
|
result.IsFiltered = true
|
|
|
|
result.Reason = FilteredParental
|
|
|
|
result.Rule = fmt.Sprintf("parental %s", m[i].Reason)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return result, nil
|
|
|
|
}
|
2019-06-24 19:00:03 +03:00
|
|
|
result, err := d.lookupCommon(host, &gctx.stats.Parental, gctx.parentalCache, false, format, handleBody)
|
2018-08-30 17:25:33 +03:00
|
|
|
return result, err
|
|
|
|
}
|
|
|
|
|
2019-01-24 20:11:01 +03:00
|
|
|
type formatHandler func(hashparam string) string
|
2019-01-25 12:21:57 +03:00
|
|
|
type bodyHandler func(body []byte, hashes map[string]bool) (Result, error)
|
2019-01-24 20:11:01 +03:00
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
// real implementation of lookup/check
|
2019-01-25 12:21:57 +03:00
|
|
|
func (d *Dnsfilter) lookupCommon(host string, lookupstats *LookupStats, cache gcache.Cache, hashparamNeedSlash bool, format formatHandler, handleBody bodyHandler) (Result, error) {
|
2018-08-30 17:25:33 +03:00
|
|
|
// if host ends with a dot, trim it
|
|
|
|
host = strings.ToLower(strings.Trim(host, "."))
|
|
|
|
|
|
|
|
// check cache
|
|
|
|
cachedValue, isFound, err := getCachedReason(cache, host)
|
|
|
|
if isFound {
|
2018-09-07 15:46:38 +03:00
|
|
|
atomic.AddUint64(&lookupstats.CacheHits, 1)
|
2019-03-18 14:50:33 +03:00
|
|
|
log.Tracef("%s: found in the lookup cache", host)
|
2018-08-30 17:25:33 +03:00
|
|
|
return cachedValue, nil
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return Result{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// convert hostname to hash parameters
|
|
|
|
hashparam, hashes := hostnameToHashParam(host, hashparamNeedSlash)
|
|
|
|
|
|
|
|
// format URL with our hashes
|
|
|
|
url := format(hashparam)
|
|
|
|
|
|
|
|
// do HTTP request
|
|
|
|
atomic.AddUint64(&lookupstats.Requests, 1)
|
|
|
|
atomic.AddInt64(&lookupstats.Pending, 1)
|
|
|
|
updateMax(&lookupstats.Pending, &lookupstats.PendingMax)
|
|
|
|
resp, err := d.client.Get(url)
|
|
|
|
atomic.AddInt64(&lookupstats.Pending, -1)
|
|
|
|
if resp != nil && resp.Body != nil {
|
|
|
|
defer resp.Body.Close()
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
// error, don't save cache
|
|
|
|
return Result{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// get body text
|
|
|
|
body, err := ioutil.ReadAll(resp.Body)
|
|
|
|
if err != nil {
|
|
|
|
// error, don't save cache
|
|
|
|
return Result{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// handle status code
|
|
|
|
switch {
|
|
|
|
case resp.StatusCode == 204:
|
|
|
|
// empty result, save cache
|
2018-09-14 16:50:56 +03:00
|
|
|
err = cache.Set(host, Result{})
|
|
|
|
if err != nil {
|
|
|
|
return Result{}, err
|
|
|
|
}
|
2018-08-30 17:25:33 +03:00
|
|
|
return Result{}, nil
|
|
|
|
case resp.StatusCode != 200:
|
|
|
|
// error, don't save cache
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
result, err := handleBody(body, hashes)
|
|
|
|
if err != nil {
|
|
|
|
// error, don't save cache
|
|
|
|
return Result{}, err
|
|
|
|
}
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
err = cache.Set(host, result)
|
|
|
|
if err != nil {
|
|
|
|
return Result{}, err
|
|
|
|
}
|
2018-08-30 17:25:33 +03:00
|
|
|
return result, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// Adding rule and matching against the rules
|
|
|
|
//
|
|
|
|
|
2019-05-15 16:46:11 +03:00
|
|
|
// Initialize urlfilter objects
|
|
|
|
func (d *Dnsfilter) initFiltering(filters map[int]string) error {
|
2019-07-04 14:00:20 +03:00
|
|
|
listArray := []urlfilter.RuleList{}
|
|
|
|
for id, dataOrFilePath := range filters {
|
|
|
|
var list urlfilter.RuleList
|
|
|
|
if id == 0 {
|
|
|
|
list = &urlfilter.StringRuleList{
|
|
|
|
ID: 0,
|
|
|
|
RulesText: dataOrFilePath,
|
|
|
|
IgnoreCosmetic: false,
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
var err error
|
|
|
|
list, err = urlfilter.NewFileRuleList(id, dataOrFilePath, false)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("urlfilter.NewFileRuleList(): %s: %s", dataOrFilePath, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
listArray = append(listArray, list)
|
|
|
|
}
|
|
|
|
|
2019-05-15 16:46:11 +03:00
|
|
|
var err error
|
2019-07-04 14:00:20 +03:00
|
|
|
d.rulesStorage, err = urlfilter.NewRuleStorage(listArray)
|
2019-05-15 16:46:11 +03:00
|
|
|
if err != nil {
|
2019-07-04 14:00:20 +03:00
|
|
|
return fmt.Errorf("urlfilter.NewRuleStorage(): %s", err)
|
2019-05-15 16:46:11 +03:00
|
|
|
}
|
2019-07-04 14:00:20 +03:00
|
|
|
d.filteringEngine = urlfilter.NewDNSEngine(d.rulesStorage)
|
2018-11-30 13:48:53 +03:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
// matchHost is a low-level way to check only if hostname is filtered by rules, skipping expensive safebrowsing and parental lookups
|
2019-05-22 12:38:17 +03:00
|
|
|
func (d *Dnsfilter) matchHost(host string, qtype uint16) (Result, error) {
|
2019-05-15 16:46:11 +03:00
|
|
|
if d.filteringEngine == nil {
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
rules, ok := d.filteringEngine.Match(host)
|
|
|
|
if !ok {
|
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
|
2019-05-22 12:38:17 +03:00
|
|
|
log.Tracef("%d rules matched for host '%s'", len(rules), host)
|
|
|
|
|
2019-05-15 16:46:11 +03:00
|
|
|
for _, rule := range rules {
|
|
|
|
|
|
|
|
log.Tracef("Found rule for host '%s': '%s' list_id: %d",
|
|
|
|
host, rule.Text(), rule.GetFilterListID())
|
|
|
|
|
|
|
|
res := Result{}
|
|
|
|
res.Reason = FilteredBlackList
|
|
|
|
res.IsFiltered = true
|
|
|
|
res.FilterID = int64(rule.GetFilterListID())
|
|
|
|
res.Rule = rule.Text()
|
|
|
|
|
|
|
|
if netRule, ok := rule.(*urlfilter.NetworkRule); ok {
|
|
|
|
|
|
|
|
if netRule.Whitelist {
|
|
|
|
res.Reason = NotFilteredWhiteList
|
|
|
|
res.IsFiltered = false
|
|
|
|
}
|
|
|
|
return res, nil
|
|
|
|
|
|
|
|
} else if hostRule, ok := rule.(*urlfilter.HostRule); ok {
|
|
|
|
|
2019-05-22 12:38:17 +03:00
|
|
|
if qtype == dns.TypeA && hostRule.IP.To4() != nil {
|
|
|
|
// either IPv4 or IPv4-mapped IPv6 address
|
|
|
|
res.IP = hostRule.IP.To4()
|
|
|
|
return res, nil
|
2019-05-22 15:50:22 +03:00
|
|
|
|
|
|
|
} else if qtype == dns.TypeAAAA {
|
|
|
|
ip4 := hostRule.IP.To4()
|
|
|
|
if ip4 == nil {
|
|
|
|
res.IP = hostRule.IP
|
|
|
|
return res, nil
|
|
|
|
}
|
|
|
|
if bytes.Equal(ip4, []byte{0, 0, 0, 0}) {
|
|
|
|
// send IP="::" response for a rule "0.0.0.0 blockdomain"
|
|
|
|
res.IP = net.IPv6zero
|
|
|
|
return res, nil
|
|
|
|
}
|
2019-05-22 12:38:17 +03:00
|
|
|
}
|
|
|
|
continue
|
2019-05-15 16:46:11 +03:00
|
|
|
|
|
|
|
} else {
|
|
|
|
log.Tracef("Rule type is unsupported: '%s' list_id: %d",
|
|
|
|
rule.Text(), rule.GetFilterListID())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
return Result{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// lifecycle helper functions
|
|
|
|
//
|
|
|
|
|
2019-05-13 14:16:07 +03:00
|
|
|
// Return TRUE if this host's IP should be cached
|
2019-05-13 14:47:55 +03:00
|
|
|
func (d *Dnsfilter) shouldBeInDialCache(host string) bool {
|
2019-05-13 14:16:07 +03:00
|
|
|
return host == d.safeBrowsingServer ||
|
|
|
|
host == d.parentalServer
|
|
|
|
}
|
|
|
|
|
|
|
|
// Search for an IP address by host name
|
2019-05-13 14:47:55 +03:00
|
|
|
func searchInDialCache(host string) string {
|
2019-06-24 19:00:03 +03:00
|
|
|
rawValue, err := gctx.dialCache.Get(host)
|
2019-05-13 14:16:07 +03:00
|
|
|
if err != nil {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
|
|
|
ip, _ := rawValue.(string)
|
|
|
|
log.Debug("Found in cache: %s -> %s", host, ip)
|
|
|
|
return ip
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add "hostname" -> "IP address" entry to cache
|
2019-05-13 14:47:55 +03:00
|
|
|
func addToDialCache(host, ip string) {
|
2019-06-24 19:00:03 +03:00
|
|
|
err := gctx.dialCache.Set(host, ip)
|
2019-05-30 15:36:39 +03:00
|
|
|
if err != nil {
|
|
|
|
log.Debug("dialCache.Set: %s", err)
|
|
|
|
}
|
2019-05-13 14:16:07 +03:00
|
|
|
log.Debug("Added to cache: %s -> %s", host, ip)
|
|
|
|
}
|
|
|
|
|
2019-04-24 12:49:12 +03:00
|
|
|
type dialFunctionType func(ctx context.Context, network, addr string) (net.Conn, error)
|
|
|
|
|
2019-04-18 14:31:13 +03:00
|
|
|
// Connect to a remote server resolving hostname using our own DNS server
|
2019-05-13 14:16:07 +03:00
|
|
|
func (d *Dnsfilter) createCustomDialContext(resolverAddr string) dialFunctionType {
|
2019-04-24 12:49:12 +03:00
|
|
|
return func(ctx context.Context, network, addr string) (net.Conn, error) {
|
|
|
|
log.Tracef("network:%v addr:%v", network, addr)
|
2019-04-18 14:31:13 +03:00
|
|
|
|
2019-04-24 12:49:12 +03:00
|
|
|
host, port, err := net.SplitHostPort(addr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-04-18 14:31:13 +03:00
|
|
|
|
2019-04-24 12:49:12 +03:00
|
|
|
dialer := &net.Dialer{
|
|
|
|
Timeout: time.Minute * 5,
|
|
|
|
}
|
2019-04-18 14:31:13 +03:00
|
|
|
|
2019-04-24 12:49:12 +03:00
|
|
|
if net.ParseIP(host) != nil {
|
|
|
|
con, err := dialer.DialContext(ctx, network, addr)
|
|
|
|
return con, err
|
|
|
|
}
|
2019-04-18 14:31:13 +03:00
|
|
|
|
2019-05-13 14:47:55 +03:00
|
|
|
cache := d.shouldBeInDialCache(host)
|
2019-05-13 14:16:07 +03:00
|
|
|
if cache {
|
2019-05-13 14:47:55 +03:00
|
|
|
ip := searchInDialCache(host)
|
2019-05-13 14:16:07 +03:00
|
|
|
if len(ip) != 0 {
|
|
|
|
addr = fmt.Sprintf("%s:%s", ip, port)
|
|
|
|
return dialer.DialContext(ctx, network, addr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-24 12:49:12 +03:00
|
|
|
r := upstream.NewResolver(resolverAddr, 30*time.Second)
|
|
|
|
addrs, e := r.LookupIPAddr(ctx, host)
|
|
|
|
log.Tracef("LookupIPAddr: %s: %v", host, addrs)
|
|
|
|
if e != nil {
|
|
|
|
return nil, e
|
|
|
|
}
|
2019-04-18 14:31:13 +03:00
|
|
|
|
2019-06-18 16:18:13 +03:00
|
|
|
if len(addrs) == 0 {
|
|
|
|
return nil, fmt.Errorf("couldn't lookup host: %s", host)
|
|
|
|
}
|
|
|
|
|
|
|
|
var dialErrs []error
|
2019-04-24 12:49:12 +03:00
|
|
|
for _, a := range addrs {
|
|
|
|
addr = fmt.Sprintf("%s:%s", a.String(), port)
|
|
|
|
con, err := dialer.DialContext(ctx, network, addr)
|
|
|
|
if err != nil {
|
2019-06-18 16:18:13 +03:00
|
|
|
dialErrs = append(dialErrs, err)
|
2019-04-24 12:49:12 +03:00
|
|
|
continue
|
2019-04-18 14:31:13 +03:00
|
|
|
}
|
2019-05-13 14:16:07 +03:00
|
|
|
|
|
|
|
if cache {
|
2019-05-13 14:47:55 +03:00
|
|
|
addToDialCache(host, a.String())
|
2019-05-13 14:16:07 +03:00
|
|
|
}
|
|
|
|
|
2019-04-24 12:49:12 +03:00
|
|
|
return con, err
|
2019-04-18 14:31:13 +03:00
|
|
|
}
|
2019-06-18 16:18:13 +03:00
|
|
|
return nil, errorx.DecorateMany(fmt.Sprintf("couldn't dial to %s", addr), dialErrs...)
|
2019-04-18 14:31:13 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// New creates properly initialized DNS Filter that is ready to be used
|
2019-05-15 16:46:11 +03:00
|
|
|
func New(c *Config, filters map[int]string) *Dnsfilter {
|
2019-06-24 19:00:03 +03:00
|
|
|
|
|
|
|
if c != nil {
|
|
|
|
// initialize objects only once
|
|
|
|
if c.SafeBrowsingEnabled && gctx.safebrowsingCache == nil {
|
|
|
|
gctx.safebrowsingCache = gcache.New(defaultCacheSize).LRU().Expiration(defaultCacheTime).Build()
|
|
|
|
}
|
|
|
|
if c.SafeSearchEnabled && gctx.safeSearchCache == nil {
|
|
|
|
gctx.safeSearchCache = gcache.New(defaultCacheSize).LRU().Expiration(defaultCacheTime).Build()
|
|
|
|
}
|
|
|
|
if c.ParentalEnabled && gctx.parentalCache == nil {
|
|
|
|
gctx.parentalCache = gcache.New(defaultCacheSize).LRU().Expiration(defaultCacheTime).Build()
|
|
|
|
}
|
|
|
|
if len(c.ResolverAddress) != 0 && gctx.dialCache == nil {
|
|
|
|
gctx.dialCache = gcache.New(maxDialCacheSize).LRU().Expiration(defaultCacheTime).Build()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
d := new(Dnsfilter)
|
|
|
|
|
2019-02-07 14:45:46 +03:00
|
|
|
// Customize the Transport to have larger connection pool,
|
|
|
|
// We are not (re)using http.DefaultTransport because of race conditions found by tests
|
|
|
|
d.transport = &http.Transport{
|
|
|
|
Proxy: http.ProxyFromEnvironment,
|
|
|
|
MaxIdleConns: defaultHTTPMaxIdleConnections, // default 100
|
|
|
|
MaxIdleConnsPerHost: defaultHTTPMaxIdleConnections, // default 2
|
|
|
|
IdleConnTimeout: 90 * time.Second,
|
|
|
|
TLSHandshakeTimeout: 10 * time.Second,
|
|
|
|
ExpectContinueTimeout: 1 * time.Second,
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
2019-04-23 15:09:23 +03:00
|
|
|
if c != nil && len(c.ResolverAddress) != 0 {
|
2019-05-13 14:16:07 +03:00
|
|
|
d.transport.DialContext = d.createCustomDialContext(c.ResolverAddress)
|
2019-04-18 14:31:13 +03:00
|
|
|
}
|
2018-08-30 17:25:33 +03:00
|
|
|
d.client = http.Client{
|
|
|
|
Transport: d.transport,
|
|
|
|
Timeout: defaultHTTPTimeout,
|
|
|
|
}
|
2018-11-30 13:32:51 +03:00
|
|
|
d.safeBrowsingServer = defaultSafebrowsingServer
|
|
|
|
d.parentalServer = defaultParentalServer
|
2018-11-30 13:47:26 +03:00
|
|
|
if c != nil {
|
|
|
|
d.Config = *c
|
|
|
|
}
|
2018-09-10 20:34:42 +03:00
|
|
|
|
2019-05-15 16:46:11 +03:00
|
|
|
if filters != nil {
|
|
|
|
err := d.initFiltering(filters)
|
|
|
|
if err != nil {
|
|
|
|
log.Error("Can't initialize filtering subsystem: %s", err)
|
|
|
|
d.Destroy()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-30 17:25:33 +03:00
|
|
|
return d
|
|
|
|
}
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// Destroy is optional if you want to tidy up goroutines without waiting for them to die off
|
|
|
|
// right now it closes idle HTTP connections if there are any
|
2018-08-30 17:25:33 +03:00
|
|
|
func (d *Dnsfilter) Destroy() {
|
2018-10-04 13:38:52 +03:00
|
|
|
if d != nil && d.transport != nil {
|
|
|
|
d.transport.CloseIdleConnections()
|
|
|
|
}
|
2019-05-15 16:46:11 +03:00
|
|
|
|
|
|
|
if d.rulesStorage != nil {
|
|
|
|
d.rulesStorage.Close()
|
|
|
|
d.rulesStorage = nil
|
|
|
|
}
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// config manipulation helpers
|
|
|
|
//
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// SetSafeBrowsingServer lets you optionally change hostname of safesearch lookup
|
2018-08-30 17:25:33 +03:00
|
|
|
func (d *Dnsfilter) SetSafeBrowsingServer(host string) {
|
|
|
|
if len(host) == 0 {
|
2018-11-30 13:32:51 +03:00
|
|
|
d.safeBrowsingServer = defaultSafebrowsingServer
|
2018-08-30 17:25:33 +03:00
|
|
|
} else {
|
2018-11-30 13:32:51 +03:00
|
|
|
d.safeBrowsingServer = host
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// SetHTTPTimeout lets you optionally change timeout during lookups
|
2018-08-30 17:25:33 +03:00
|
|
|
func (d *Dnsfilter) SetHTTPTimeout(t time.Duration) {
|
|
|
|
d.client.Timeout = t
|
|
|
|
}
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// ResetHTTPTimeout resets lookup timeouts
|
2018-08-30 17:25:33 +03:00
|
|
|
func (d *Dnsfilter) ResetHTTPTimeout() {
|
|
|
|
d.client.Timeout = defaultHTTPTimeout
|
|
|
|
}
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// SafeSearchDomain returns replacement address for search engine
|
2018-08-30 17:25:33 +03:00
|
|
|
func (d *Dnsfilter) SafeSearchDomain(host string) (string, bool) {
|
2018-11-30 13:32:51 +03:00
|
|
|
if d.SafeSearchEnabled {
|
2018-09-14 16:50:56 +03:00
|
|
|
val, ok := safeSearchDomains[host]
|
|
|
|
return val, ok
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
2018-09-14 16:50:56 +03:00
|
|
|
return "", false
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
//
|
|
|
|
// stats
|
|
|
|
//
|
|
|
|
|
2018-09-14 16:50:56 +03:00
|
|
|
// GetStats return dns filtering stats since startup
|
2018-08-30 17:25:33 +03:00
|
|
|
func (d *Dnsfilter) GetStats() Stats {
|
2019-06-24 19:00:03 +03:00
|
|
|
return gctx.stats
|
2018-08-30 17:25:33 +03:00
|
|
|
}
|