mirror of
https://github.com/AdguardTeam/AdGuardHome.git
synced 2024-11-24 05:55:43 +03:00
Pull request: 2271 handle nolint
Merge in DNS/adguard-home from 2271-handle-nolint to master Closes #2271. Squashed commit of the following: commit fde5c8795ac79e1f7d02ba8c8e369b5a724a000e Merge: fc2acd898642dcd647
Author: Eugene Burkov <e.burkov@adguard.com> Date: Fri Nov 20 17:12:28 2020 +0300 Merge branch 'master' into 2271-handle-nolint commit fc2acd89871de08c39e80ace9e5bb8a7acb7afba Author: Eugene Burkov <e.burkov@adguard.com> Date: Tue Nov 17 11:55:29 2020 +0300 dnsforward: fix test output strings commit c4ebae6ea9c293bad239519c44ca5a6c576bb921 Author: Eugene Burkov <e.burkov@adguard.com> Date: Mon Nov 16 22:43:20 2020 +0300 dnsfilter: make package pass tests commit f2d98c6acabd8977f3b1b361987eaa31eb6eb9ad Author: Eugene Burkov <e.burkov@adguard.com> Date: Mon Nov 16 20:05:00 2020 +0300 querylog: make decoding pass tests commit ab5850d24c50d53b8393f2de448cc340241351d7 Merge: 6ed2066bf8a9c6e8a0
Author: Eugene Burkov <e.burkov@adguard.com> Date: Mon Nov 16 19:48:31 2020 +0300 Merge branch 'master' into 2271-handle-nolint commit 6ed2066bf567e13dd14cfa16fc7b109b59fa39ef Author: Eugene Burkov <e.burkov@adguard.com> Date: Mon Nov 16 18:13:45 2020 +0300 home: fix tests naming commit af691081fb02b7500a746b16492f01f7f9befe9a Author: Eugene Burkov <e.burkov@adguard.com> Date: Mon Nov 16 12:15:49 2020 +0300 home: impove code quality commit 2914cd3cd23ef2a1964116baab9187d89b377f86 Author: Eugene Burkov <e.burkov@adguard.com> Date: Wed Nov 11 15:46:39 2020 +0300 * querylog: remove useless check commit 9996840650e784ccc76d1f29964560435ba27dc7 Author: Eugene Burkov <e.burkov@adguard.com> Date: Wed Nov 11 13:18:34 2020 +0300 * all: fix noticed defects commit 2b15293e59337f70302fbc0db81ebb26bee0bed2 Author: Eugene Burkov <e.burkov@adguard.com> Date: Tue Nov 10 20:15:53 2020 +0300 * stats: remove last nolint directive commit b2e1ddf7b58196a2fdbf879f084edb41ca1aa1eb Author: Eugene Burkov <e.burkov@adguard.com> Date: Tue Nov 10 18:35:41 2020 +0300 * all: remove another nolint directive commit c6fc5cfcc9c95ab9e570a95ab41c3e5c0125e62e Author: Eugene Burkov <e.burkov@adguard.com> Date: Tue Nov 10 18:11:28 2020 +0300 * querylog: remove nolint directive commit 226ddbf2c92f737f085b44a4ddf6daec7b602153 Author: Eugene Burkov <e.burkov@adguard.com> Date: Tue Nov 10 16:35:26 2020 +0300 * home: remove nolint directive commit 2ea3086ad41e9003282add7e996ae722d72d878b Author: Eugene Burkov <e.burkov@adguard.com> Date: Tue Nov 10 16:13:57 2020 +0300 * home: reduce cyclomatic complexity of run function commit f479b480c48e0bb832ddef8f57586f56b8a55bab Author: Eugene Burkov <e.burkov@adguard.com> Date: Tue Nov 10 15:35:46 2020 +0300 * home: use crypto/rand instead of math/rand commit a28d4a53e3b930136b036606fc7e78404f1d208b Author: Eugene Burkov <e.burkov@adguard.com> Date: Tue Nov 10 14:11:07 2020 +0300 * dnsforward: remove gocyclo nolint directive commit 64a0a324cc2b20614ceec3ccc6505e960fe526e9 Author: Eugene Burkov <e.burkov@adguard.com> Date: Tue Nov 10 11:45:49 2020 +0300 all *: remove some nolint directives Updates #2271.
This commit is contained in:
parent
642dcd647c
commit
3045da1742
17 changed files with 1053 additions and 832 deletions
|
@ -39,7 +39,6 @@ linters:
|
||||||
- govet
|
- govet
|
||||||
- ineffassign
|
- ineffassign
|
||||||
- staticcheck
|
- staticcheck
|
||||||
- structcheck
|
|
||||||
- unused
|
- unused
|
||||||
- varcheck
|
- varcheck
|
||||||
- bodyclose
|
- bodyclose
|
||||||
|
|
|
@ -177,6 +177,16 @@ func (r Reason) String() string {
|
||||||
return reasonNames[r]
|
return reasonNames[r]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// In returns true if reasons include r.
|
||||||
|
func (r Reason) In(reasons ...Reason) bool {
|
||||||
|
for _, reason := range reasons {
|
||||||
|
if r == reason {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// GetConfig - get configuration
|
// GetConfig - get configuration
|
||||||
func (d *Dnsfilter) GetConfig() RequestFilteringSettings {
|
func (d *Dnsfilter) GetConfig() RequestFilteringSettings {
|
||||||
c := RequestFilteringSettings{}
|
c := RequestFilteringSettings{}
|
||||||
|
|
|
@ -71,33 +71,37 @@ func (c *sbCtx) setCache(prefix, hashes []byte) {
|
||||||
log.Debug("%s: stored in cache: %v", c.svc, prefix)
|
log.Debug("%s: stored in cache: %v", c.svc, prefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// findInHash returns 32-byte hash if it's found in hashToHost.
|
||||||
|
func (c *sbCtx) findInHash(val []byte) (hash32 [32]byte, found bool) {
|
||||||
|
for i := 4; i < len(val); i += 32 {
|
||||||
|
hash := val[i : i+32]
|
||||||
|
|
||||||
|
copy(hash32[:], hash[0:32])
|
||||||
|
|
||||||
|
_, found = c.hashToHost[hash32]
|
||||||
|
if found {
|
||||||
|
return hash32, found
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return [32]byte{}, false
|
||||||
|
}
|
||||||
|
|
||||||
func (c *sbCtx) getCached() int {
|
func (c *sbCtx) getCached() int {
|
||||||
now := time.Now().Unix()
|
now := time.Now().Unix()
|
||||||
hashesToRequest := map[[32]byte]string{}
|
hashesToRequest := map[[32]byte]string{}
|
||||||
for k, v := range c.hashToHost {
|
for k, v := range c.hashToHost {
|
||||||
key := k[0:2]
|
key := k[0:2]
|
||||||
val := c.cache.Get(key)
|
val := c.cache.Get(key)
|
||||||
if val != nil {
|
if val == nil || now >= int64(binary.BigEndian.Uint32(val)) {
|
||||||
expire := binary.BigEndian.Uint32(val)
|
hashesToRequest[k] = v
|
||||||
if now >= int64(expire) {
|
continue
|
||||||
val = nil
|
}
|
||||||
} else {
|
if hash32, found := c.findInHash(val); found {
|
||||||
for i := 4; i < len(val); i += 32 {
|
|
||||||
hash := val[i : i+32]
|
|
||||||
var hash32 [32]byte
|
|
||||||
copy(hash32[:], hash[0:32])
|
|
||||||
_, found := c.hashToHost[hash32]
|
|
||||||
if found {
|
|
||||||
log.Debug("%s: found in cache: %s: blocked by %v", c.svc, c.host, hash32)
|
log.Debug("%s: found in cache: %s: blocked by %v", c.svc, c.host, hash32)
|
||||||
return 1
|
return 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
if val == nil {
|
|
||||||
hashesToRequest[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(hashesToRequest) == 0 {
|
if len(hashesToRequest) == 0 {
|
||||||
log.Debug("%s: found in cache: %s: not blocked", c.svc, c.host)
|
log.Debug("%s: found in cache: %s: not blocked", c.svc, c.host)
|
||||||
|
@ -254,106 +258,71 @@ func (c *sbCtx) storeCache(hashes [][]byte) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disabling "dupl": the algorithm of SB/PC is similar, but it uses different data
|
func check(c *sbCtx, r Result, u upstream.Upstream) (Result, error) {
|
||||||
// nolint:dupl
|
c.hashToHost = hostnameToHashes(c.host)
|
||||||
|
switch c.getCached() {
|
||||||
|
case -1:
|
||||||
|
return Result{}, nil
|
||||||
|
case 1:
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
question := c.getQuestion()
|
||||||
|
|
||||||
|
log.Tracef("%s: checking %s: %s", c.svc, c.host, question)
|
||||||
|
req := (&dns.Msg{}).SetQuestion(question, dns.TypeTXT)
|
||||||
|
|
||||||
|
resp, err := u.Exchange(req)
|
||||||
|
if err != nil {
|
||||||
|
return Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
matched, receivedHashes := c.processTXT(resp)
|
||||||
|
|
||||||
|
c.storeCache(receivedHashes)
|
||||||
|
if matched {
|
||||||
|
return r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Dnsfilter) checkSafeBrowsing(host string) (Result, error) {
|
func (d *Dnsfilter) checkSafeBrowsing(host string) (Result, error) {
|
||||||
if log.GetLevel() >= log.DEBUG {
|
if log.GetLevel() >= log.DEBUG {
|
||||||
timer := log.StartTimer()
|
timer := log.StartTimer()
|
||||||
defer timer.LogElapsed("SafeBrowsing lookup for %s", host)
|
defer timer.LogElapsed("SafeBrowsing lookup for %s", host)
|
||||||
}
|
}
|
||||||
|
ctx := &sbCtx{
|
||||||
result := Result{}
|
|
||||||
hashes := hostnameToHashes(host)
|
|
||||||
|
|
||||||
c := &sbCtx{
|
|
||||||
host: host,
|
host: host,
|
||||||
svc: "SafeBrowsing",
|
svc: "SafeBrowsing",
|
||||||
hashToHost: hashes,
|
|
||||||
cache: gctx.safebrowsingCache,
|
cache: gctx.safebrowsingCache,
|
||||||
cacheTime: d.Config.CacheTime,
|
cacheTime: d.Config.CacheTime,
|
||||||
}
|
}
|
||||||
|
res := Result{
|
||||||
// check cache
|
IsFiltered: true,
|
||||||
match := c.getCached()
|
Reason: FilteredSafeBrowsing,
|
||||||
if match < 0 {
|
Rule: "adguard-malware-shavar",
|
||||||
return result, nil
|
|
||||||
} else if match > 0 {
|
|
||||||
result.IsFiltered = true
|
|
||||||
result.Reason = FilteredSafeBrowsing
|
|
||||||
result.Rule = "adguard-malware-shavar"
|
|
||||||
return result, nil
|
|
||||||
}
|
}
|
||||||
|
return check(ctx, res, d.safeBrowsingUpstream)
|
||||||
question := c.getQuestion()
|
|
||||||
log.Tracef("SafeBrowsing: checking %s: %s", host, question)
|
|
||||||
|
|
||||||
req := dns.Msg{}
|
|
||||||
req.SetQuestion(question, dns.TypeTXT)
|
|
||||||
resp, err := d.safeBrowsingUpstream.Exchange(&req)
|
|
||||||
if err != nil {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
|
|
||||||
matched, receivedHashes := c.processTXT(resp)
|
|
||||||
if matched {
|
|
||||||
result.IsFiltered = true
|
|
||||||
result.Reason = FilteredSafeBrowsing
|
|
||||||
result.Rule = "adguard-malware-shavar"
|
|
||||||
}
|
|
||||||
c.storeCache(receivedHashes)
|
|
||||||
|
|
||||||
return result, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disabling "dupl": the algorithm of SB/PC is similar, but it uses different data
|
|
||||||
// nolint:dupl
|
|
||||||
func (d *Dnsfilter) checkParental(host string) (Result, error) {
|
func (d *Dnsfilter) checkParental(host string) (Result, error) {
|
||||||
if log.GetLevel() >= log.DEBUG {
|
if log.GetLevel() >= log.DEBUG {
|
||||||
timer := log.StartTimer()
|
timer := log.StartTimer()
|
||||||
defer timer.LogElapsed("Parental lookup for %s", host)
|
defer timer.LogElapsed("Parental lookup for %s", host)
|
||||||
}
|
}
|
||||||
|
ctx := &sbCtx{
|
||||||
result := Result{}
|
|
||||||
hashes := hostnameToHashes(host)
|
|
||||||
|
|
||||||
c := &sbCtx{
|
|
||||||
host: host,
|
host: host,
|
||||||
svc: "Parental",
|
svc: "Parental",
|
||||||
hashToHost: hashes,
|
|
||||||
cache: gctx.parentalCache,
|
cache: gctx.parentalCache,
|
||||||
cacheTime: d.Config.CacheTime,
|
cacheTime: d.Config.CacheTime,
|
||||||
}
|
}
|
||||||
|
res := Result{
|
||||||
// check cache
|
IsFiltered: true,
|
||||||
match := c.getCached()
|
Reason: FilteredParental,
|
||||||
if match < 0 {
|
Rule: "parental CATEGORY_BLACKLISTED",
|
||||||
return result, nil
|
|
||||||
} else if match > 0 {
|
|
||||||
result.IsFiltered = true
|
|
||||||
result.Reason = FilteredParental
|
|
||||||
result.Rule = "parental CATEGORY_BLACKLISTED"
|
|
||||||
return result, nil
|
|
||||||
}
|
}
|
||||||
|
return check(ctx, res, d.parentalUpstream)
|
||||||
question := c.getQuestion()
|
|
||||||
log.Tracef("Parental: checking %s: %s", host, question)
|
|
||||||
|
|
||||||
req := dns.Msg{}
|
|
||||||
req.SetQuestion(question, dns.TypeTXT)
|
|
||||||
resp, err := d.parentalUpstream.Exchange(&req)
|
|
||||||
if err != nil {
|
|
||||||
return result, err
|
|
||||||
}
|
|
||||||
|
|
||||||
matched, receivedHashes := c.processTXT(resp)
|
|
||||||
if matched {
|
|
||||||
result.IsFiltered = true
|
|
||||||
result.Reason = FilteredParental
|
|
||||||
result.Rule = "parental CATEGORY_BLACKLISTED"
|
|
||||||
}
|
|
||||||
c.storeCache(receivedHashes)
|
|
||||||
|
|
||||||
return result, err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func httpError(r *http.Request, w http.ResponseWriter, code int, format string, args ...interface{}) {
|
func httpError(r *http.Request, w http.ResponseWriter, code int, format string, args ...interface{}) {
|
||||||
|
|
|
@ -94,19 +94,24 @@ type FilteringConfig struct {
|
||||||
type TLSConfig struct {
|
type TLSConfig struct {
|
||||||
TLSListenAddr *net.TCPAddr `yaml:"-" json:"-"`
|
TLSListenAddr *net.TCPAddr `yaml:"-" json:"-"`
|
||||||
QUICListenAddr *net.UDPAddr `yaml:"-" json:"-"`
|
QUICListenAddr *net.UDPAddr `yaml:"-" json:"-"`
|
||||||
StrictSNICheck bool `yaml:"strict_sni_check" json:"-"` // Reject connection if the client uses server name (in SNI) that doesn't match the certificate
|
|
||||||
|
|
||||||
CertificateChain string `yaml:"certificate_chain" json:"certificate_chain"` // PEM-encoded certificates chain
|
// Reject connection if the client uses server name (in SNI) that doesn't match the certificate
|
||||||
PrivateKey string `yaml:"private_key" json:"private_key"` // PEM-encoded private key
|
StrictSNICheck bool `yaml:"strict_sni_check" json:"-"`
|
||||||
|
|
||||||
CertificatePath string `yaml:"certificate_path" json:"certificate_path"` // certificate file name
|
// PEM-encoded certificates chain
|
||||||
PrivateKeyPath string `yaml:"private_key_path" json:"private_key_path"` // private key file name
|
CertificateChain string `yaml:"certificate_chain" json:"certificate_chain"`
|
||||||
|
// PEM-encoded private key
|
||||||
|
PrivateKey string `yaml:"private_key" json:"private_key"`
|
||||||
|
|
||||||
|
CertificatePath string `yaml:"certificate_path" json:"certificate_path"`
|
||||||
|
PrivateKeyPath string `yaml:"private_key_path" json:"private_key_path"`
|
||||||
|
|
||||||
CertificateChainData []byte `yaml:"-" json:"-"`
|
CertificateChainData []byte `yaml:"-" json:"-"`
|
||||||
PrivateKeyData []byte `yaml:"-" json:"-"`
|
PrivateKeyData []byte `yaml:"-" json:"-"`
|
||||||
|
|
||||||
cert tls.Certificate // nolint(structcheck) - linter thinks that this field is unused, while TLSConfig is directly included into ServerConfig
|
cert tls.Certificate
|
||||||
dnsNames []string // nolint(structcheck) // DNS names from certificate (SAN) or CN value from Subject
|
// DNS names from certificate (SAN) or CN value from Subject
|
||||||
|
dnsNames []string
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerConfig represents server configuration.
|
// ServerConfig represents server configuration.
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/AdguardTeam/dnsproxy/upstream"
|
"github.com/AdguardTeam/dnsproxy/upstream"
|
||||||
"github.com/AdguardTeam/golibs/jsonutil"
|
|
||||||
"github.com/AdguardTeam/golibs/log"
|
"github.com/AdguardTeam/golibs/log"
|
||||||
"github.com/AdguardTeam/golibs/utils"
|
"github.com/AdguardTeam/golibs/utils"
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
|
@ -21,232 +20,292 @@ func httpError(r *http.Request, w http.ResponseWriter, code int, format string,
|
||||||
http.Error(w, text, code)
|
http.Error(w, text, code)
|
||||||
}
|
}
|
||||||
|
|
||||||
type dnsConfigJSON struct {
|
type dnsConfig struct {
|
||||||
Upstreams []string `json:"upstream_dns"`
|
Upstreams *[]string `json:"upstream_dns"`
|
||||||
UpstreamsFile string `json:"upstream_dns_file"`
|
UpstreamsFile *string `json:"upstream_dns_file"`
|
||||||
Bootstraps []string `json:"bootstrap_dns"`
|
Bootstraps *[]string `json:"bootstrap_dns"`
|
||||||
|
|
||||||
ProtectionEnabled bool `json:"protection_enabled"`
|
ProtectionEnabled *bool `json:"protection_enabled"`
|
||||||
RateLimit uint32 `json:"ratelimit"`
|
RateLimit *uint32 `json:"ratelimit"`
|
||||||
BlockingMode string `json:"blocking_mode"`
|
BlockingMode *string `json:"blocking_mode"`
|
||||||
BlockingIPv4 string `json:"blocking_ipv4"`
|
BlockingIPv4 *string `json:"blocking_ipv4"`
|
||||||
BlockingIPv6 string `json:"blocking_ipv6"`
|
BlockingIPv6 *string `json:"blocking_ipv6"`
|
||||||
EDNSCSEnabled bool `json:"edns_cs_enabled"`
|
EDNSCSEnabled *bool `json:"edns_cs_enabled"`
|
||||||
DNSSECEnabled bool `json:"dnssec_enabled"`
|
DNSSECEnabled *bool `json:"dnssec_enabled"`
|
||||||
DisableIPv6 bool `json:"disable_ipv6"`
|
DisableIPv6 *bool `json:"disable_ipv6"`
|
||||||
UpstreamMode string `json:"upstream_mode"`
|
UpstreamMode *string `json:"upstream_mode"`
|
||||||
CacheSize uint32 `json:"cache_size"`
|
CacheSize *uint32 `json:"cache_size"`
|
||||||
CacheMinTTL uint32 `json:"cache_ttl_min"`
|
CacheMinTTL *uint32 `json:"cache_ttl_min"`
|
||||||
CacheMaxTTL uint32 `json:"cache_ttl_max"`
|
CacheMaxTTL *uint32 `json:"cache_ttl_max"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Server) getDNSConfig() dnsConfig {
|
||||||
|
s.RLock()
|
||||||
|
upstreams := stringArrayDup(s.conf.UpstreamDNS)
|
||||||
|
upstreamFile := s.conf.UpstreamDNSFileName
|
||||||
|
bootstraps := stringArrayDup(s.conf.BootstrapDNS)
|
||||||
|
protectionEnabled := s.conf.ProtectionEnabled
|
||||||
|
blockingMode := s.conf.BlockingMode
|
||||||
|
BlockingIPv4 := s.conf.BlockingIPv4
|
||||||
|
BlockingIPv6 := s.conf.BlockingIPv6
|
||||||
|
Ratelimit := s.conf.Ratelimit
|
||||||
|
EnableEDNSClientSubnet := s.conf.EnableEDNSClientSubnet
|
||||||
|
EnableDNSSEC := s.conf.EnableDNSSEC
|
||||||
|
AAAADisabled := s.conf.AAAADisabled
|
||||||
|
CacheSize := s.conf.CacheSize
|
||||||
|
CacheMinTTL := s.conf.CacheMinTTL
|
||||||
|
CacheMaxTTL := s.conf.CacheMaxTTL
|
||||||
|
var upstreamMode string
|
||||||
|
if s.conf.FastestAddr {
|
||||||
|
upstreamMode = "fastest_addr"
|
||||||
|
} else if s.conf.AllServers {
|
||||||
|
upstreamMode = "parallel"
|
||||||
|
}
|
||||||
|
s.RUnlock()
|
||||||
|
return dnsConfig{
|
||||||
|
Upstreams: &upstreams,
|
||||||
|
UpstreamsFile: &upstreamFile,
|
||||||
|
Bootstraps: &bootstraps,
|
||||||
|
ProtectionEnabled: &protectionEnabled,
|
||||||
|
BlockingMode: &blockingMode,
|
||||||
|
BlockingIPv4: &BlockingIPv4,
|
||||||
|
BlockingIPv6: &BlockingIPv6,
|
||||||
|
RateLimit: &Ratelimit,
|
||||||
|
EDNSCSEnabled: &EnableEDNSClientSubnet,
|
||||||
|
DNSSECEnabled: &EnableDNSSEC,
|
||||||
|
DisableIPv6: &AAAADisabled,
|
||||||
|
CacheSize: &CacheSize,
|
||||||
|
CacheMinTTL: &CacheMinTTL,
|
||||||
|
CacheMaxTTL: &CacheMaxTTL,
|
||||||
|
UpstreamMode: &upstreamMode,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) handleGetConfig(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) handleGetConfig(w http.ResponseWriter, r *http.Request) {
|
||||||
resp := dnsConfigJSON{}
|
resp := s.getDNSConfig()
|
||||||
s.RLock()
|
|
||||||
resp.Upstreams = stringArrayDup(s.conf.UpstreamDNS)
|
|
||||||
resp.UpstreamsFile = s.conf.UpstreamDNSFileName
|
|
||||||
resp.Bootstraps = stringArrayDup(s.conf.BootstrapDNS)
|
|
||||||
|
|
||||||
resp.ProtectionEnabled = s.conf.ProtectionEnabled
|
|
||||||
resp.BlockingMode = s.conf.BlockingMode
|
|
||||||
resp.BlockingIPv4 = s.conf.BlockingIPv4
|
|
||||||
resp.BlockingIPv6 = s.conf.BlockingIPv6
|
|
||||||
resp.RateLimit = s.conf.Ratelimit
|
|
||||||
resp.EDNSCSEnabled = s.conf.EnableEDNSClientSubnet
|
|
||||||
resp.DNSSECEnabled = s.conf.EnableDNSSEC
|
|
||||||
resp.DisableIPv6 = s.conf.AAAADisabled
|
|
||||||
resp.CacheSize = s.conf.CacheSize
|
|
||||||
resp.CacheMinTTL = s.conf.CacheMinTTL
|
|
||||||
resp.CacheMaxTTL = s.conf.CacheMaxTTL
|
|
||||||
if s.conf.FastestAddr {
|
|
||||||
resp.UpstreamMode = "fastest_addr"
|
|
||||||
} else if s.conf.AllServers {
|
|
||||||
resp.UpstreamMode = "parallel"
|
|
||||||
}
|
|
||||||
s.RUnlock()
|
|
||||||
|
|
||||||
js, err := json.Marshal(resp)
|
|
||||||
if err != nil {
|
|
||||||
httpError(r, w, http.StatusInternalServerError, "json.Marshal: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
_, _ = w.Write(js)
|
|
||||||
|
enc := json.NewEncoder(w)
|
||||||
|
if err := enc.Encode(resp); err != nil {
|
||||||
|
httpError(r, w, http.StatusInternalServerError, "json.Encoder: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkBlockingMode(req dnsConfigJSON) bool {
|
func (req *dnsConfig) checkBlockingMode() bool {
|
||||||
bm := req.BlockingMode
|
if req.BlockingMode == nil {
|
||||||
if !(bm == "default" || bm == "refused" || bm == "nxdomain" || bm == "null_ip" || bm == "custom_ip") {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if bm == "custom_ip" {
|
|
||||||
ip := net.ParseIP(req.BlockingIPv4)
|
|
||||||
if ip == nil || ip.To4() == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
ip = net.ParseIP(req.BlockingIPv6)
|
|
||||||
if ip == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
bm := *req.BlockingMode
|
||||||
|
if bm == "custom_ip" {
|
||||||
|
if req.BlockingIPv4 == nil || req.BlockingIPv6 == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
ip4 := net.ParseIP(*req.BlockingIPv4)
|
||||||
|
if ip4 == nil || ip4.To4() == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
ip6 := net.ParseIP(*req.BlockingIPv6)
|
||||||
|
return ip6 != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, valid := range []string{
|
||||||
|
"default",
|
||||||
|
"refused",
|
||||||
|
"nxdomain",
|
||||||
|
"null_ip",
|
||||||
|
} {
|
||||||
|
if bm == valid {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate bootstrap server address
|
func (req *dnsConfig) checkUpstreamsMode() bool {
|
||||||
func checkBootstrap(addr string) error {
|
if req.UpstreamMode == nil {
|
||||||
if addr == "" { // additional check is required because NewResolver() allows empty address
|
return true
|
||||||
return fmt.Errorf("invalid bootstrap server address: empty")
|
|
||||||
}
|
}
|
||||||
_, err := upstream.NewResolver(addr, 0)
|
|
||||||
if err != nil {
|
for _, valid := range []string{
|
||||||
return fmt.Errorf("invalid bootstrap server address: %w", err)
|
"",
|
||||||
|
"fastest_addr",
|
||||||
|
"parallel",
|
||||||
|
} {
|
||||||
|
if *req.UpstreamMode == valid {
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
return nil
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (req *dnsConfig) checkBootstrap() (string, error) {
|
||||||
|
if req.Bootstraps == nil {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, boot := range *req.Bootstraps {
|
||||||
|
if boot == "" {
|
||||||
|
return boot, fmt.Errorf("invalid bootstrap server address: empty")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := upstream.NewResolver(boot, 0); err != nil {
|
||||||
|
return boot, fmt.Errorf("invalid bootstrap server address: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (req *dnsConfig) checkCacheTTL() bool {
|
||||||
|
if req.CacheMinTTL == nil && req.CacheMaxTTL == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
var min, max uint32
|
||||||
|
if req.CacheMinTTL != nil {
|
||||||
|
min = *req.CacheMinTTL
|
||||||
|
}
|
||||||
|
if req.CacheMaxTTL == nil {
|
||||||
|
max = *req.CacheMaxTTL
|
||||||
|
}
|
||||||
|
|
||||||
|
return min <= max
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint(gocyclo) - we need to check each JSON field separately
|
|
||||||
func (s *Server) handleSetConfig(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) handleSetConfig(w http.ResponseWriter, r *http.Request) {
|
||||||
req := dnsConfigJSON{}
|
req := dnsConfig{}
|
||||||
js, err := jsonutil.DecodeObject(&req, r.Body)
|
dec := json.NewDecoder(r.Body)
|
||||||
if err != nil {
|
if err := dec.Decode(&req); err != nil {
|
||||||
httpError(r, w, http.StatusBadRequest, "json.Decode: %s", err)
|
httpError(r, w, http.StatusBadRequest, "json Encode: %s", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if js.Exists("upstream_dns") {
|
if req.Upstreams != nil {
|
||||||
err = ValidateUpstreams(req.Upstreams)
|
if err := ValidateUpstreams(*req.Upstreams); err != nil {
|
||||||
if err != nil {
|
|
||||||
httpError(r, w, http.StatusBadRequest, "wrong upstreams specification: %s", err)
|
httpError(r, w, http.StatusBadRequest, "wrong upstreams specification: %s", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if js.Exists("bootstrap_dns") {
|
if errBoot, err := req.checkBootstrap(); err != nil {
|
||||||
for _, boot := range req.Bootstraps {
|
httpError(r, w, http.StatusBadRequest, "%s can not be used as bootstrap dns cause: %s", errBoot, err)
|
||||||
if err := checkBootstrap(boot); err != nil {
|
|
||||||
httpError(r, w, http.StatusBadRequest, "%s can not be used as bootstrap dns cause: %s", boot, err)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if js.Exists("blocking_mode") && !checkBlockingMode(req) {
|
if !req.checkBlockingMode() {
|
||||||
httpError(r, w, http.StatusBadRequest, "blocking_mode: incorrect value")
|
httpError(r, w, http.StatusBadRequest, "blocking_mode: incorrect value")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if js.Exists("upstream_mode") &&
|
if !req.checkUpstreamsMode() {
|
||||||
!(req.UpstreamMode == "" || req.UpstreamMode == "fastest_addr" || req.UpstreamMode == "parallel") {
|
|
||||||
httpError(r, w, http.StatusBadRequest, "upstream_mode: incorrect value")
|
httpError(r, w, http.StatusBadRequest, "upstream_mode: incorrect value")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if req.CacheMinTTL > req.CacheMaxTTL {
|
if !req.checkCacheTTL() {
|
||||||
httpError(r, w, http.StatusBadRequest, "cache_ttl_min must be less or equal than cache_ttl_max")
|
httpError(r, w, http.StatusBadRequest, "cache_ttl_min must be less or equal than cache_ttl_max")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
restart := false
|
if s.setConfig(req) {
|
||||||
s.Lock()
|
if err := s.Reconfigure(nil); err != nil {
|
||||||
|
|
||||||
if js.Exists("upstream_dns") {
|
|
||||||
s.conf.UpstreamDNS = req.Upstreams
|
|
||||||
restart = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if js.Exists("upstream_dns_file") {
|
|
||||||
s.conf.UpstreamDNSFileName = req.UpstreamsFile
|
|
||||||
restart = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if js.Exists("bootstrap_dns") {
|
|
||||||
s.conf.BootstrapDNS = req.Bootstraps
|
|
||||||
restart = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if js.Exists("protection_enabled") {
|
|
||||||
s.conf.ProtectionEnabled = req.ProtectionEnabled
|
|
||||||
}
|
|
||||||
|
|
||||||
if js.Exists("blocking_mode") {
|
|
||||||
s.conf.BlockingMode = req.BlockingMode
|
|
||||||
if req.BlockingMode == "custom_ip" {
|
|
||||||
if js.Exists("blocking_ipv4") {
|
|
||||||
s.conf.BlockingIPv4 = req.BlockingIPv4
|
|
||||||
s.conf.BlockingIPAddrv4 = net.ParseIP(req.BlockingIPv4)
|
|
||||||
}
|
|
||||||
if js.Exists("blocking_ipv6") {
|
|
||||||
s.conf.BlockingIPv6 = req.BlockingIPv6
|
|
||||||
s.conf.BlockingIPAddrv6 = net.ParseIP(req.BlockingIPv6)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if js.Exists("ratelimit") {
|
|
||||||
if s.conf.Ratelimit != req.RateLimit {
|
|
||||||
restart = true
|
|
||||||
}
|
|
||||||
s.conf.Ratelimit = req.RateLimit
|
|
||||||
}
|
|
||||||
|
|
||||||
if js.Exists("edns_cs_enabled") {
|
|
||||||
s.conf.EnableEDNSClientSubnet = req.EDNSCSEnabled
|
|
||||||
restart = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if js.Exists("dnssec_enabled") {
|
|
||||||
s.conf.EnableDNSSEC = req.DNSSECEnabled
|
|
||||||
}
|
|
||||||
|
|
||||||
if js.Exists("disable_ipv6") {
|
|
||||||
s.conf.AAAADisabled = req.DisableIPv6
|
|
||||||
}
|
|
||||||
|
|
||||||
if js.Exists("cache_size") {
|
|
||||||
s.conf.CacheSize = req.CacheSize
|
|
||||||
restart = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if js.Exists("cache_ttl_min") {
|
|
||||||
s.conf.CacheMinTTL = req.CacheMinTTL
|
|
||||||
restart = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if js.Exists("cache_ttl_max") {
|
|
||||||
s.conf.CacheMaxTTL = req.CacheMaxTTL
|
|
||||||
restart = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if js.Exists("upstream_mode") {
|
|
||||||
s.conf.FastestAddr = false
|
|
||||||
s.conf.AllServers = false
|
|
||||||
switch req.UpstreamMode {
|
|
||||||
case "":
|
|
||||||
//
|
|
||||||
|
|
||||||
case "parallel":
|
|
||||||
s.conf.AllServers = true
|
|
||||||
|
|
||||||
case "fastest_addr":
|
|
||||||
s.conf.FastestAddr = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
s.Unlock()
|
|
||||||
s.conf.ConfigModified()
|
|
||||||
|
|
||||||
if restart {
|
|
||||||
err = s.Reconfigure(nil)
|
|
||||||
if err != nil {
|
|
||||||
httpError(r, w, http.StatusInternalServerError, "%s", err)
|
httpError(r, w, http.StatusInternalServerError, "%s", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *Server) setConfig(dc dnsConfig) (restart bool) {
|
||||||
|
s.Lock()
|
||||||
|
|
||||||
|
if dc.Upstreams != nil {
|
||||||
|
s.conf.UpstreamDNS = *dc.Upstreams
|
||||||
|
restart = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if dc.UpstreamsFile != nil {
|
||||||
|
s.conf.UpstreamDNSFileName = *dc.UpstreamsFile
|
||||||
|
restart = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if dc.Bootstraps != nil {
|
||||||
|
s.conf.BootstrapDNS = *dc.Bootstraps
|
||||||
|
restart = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if dc.ProtectionEnabled != nil {
|
||||||
|
s.conf.ProtectionEnabled = *dc.ProtectionEnabled
|
||||||
|
}
|
||||||
|
|
||||||
|
if dc.BlockingMode != nil {
|
||||||
|
s.conf.BlockingMode = *dc.BlockingMode
|
||||||
|
if *dc.BlockingMode == "custom_ip" {
|
||||||
|
s.conf.BlockingIPv4 = *dc.BlockingIPv4
|
||||||
|
s.conf.BlockingIPAddrv4 = net.ParseIP(*dc.BlockingIPv4)
|
||||||
|
s.conf.BlockingIPv6 = *dc.BlockingIPv6
|
||||||
|
s.conf.BlockingIPAddrv6 = net.ParseIP(*dc.BlockingIPv6)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if dc.RateLimit != nil {
|
||||||
|
if s.conf.Ratelimit != *dc.RateLimit {
|
||||||
|
restart = true
|
||||||
|
}
|
||||||
|
s.conf.Ratelimit = *dc.RateLimit
|
||||||
|
}
|
||||||
|
|
||||||
|
if dc.EDNSCSEnabled != nil {
|
||||||
|
s.conf.EnableEDNSClientSubnet = *dc.EDNSCSEnabled
|
||||||
|
restart = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if dc.DNSSECEnabled != nil {
|
||||||
|
s.conf.EnableDNSSEC = *dc.DNSSECEnabled
|
||||||
|
}
|
||||||
|
|
||||||
|
if dc.DisableIPv6 != nil {
|
||||||
|
s.conf.AAAADisabled = *dc.DisableIPv6
|
||||||
|
}
|
||||||
|
|
||||||
|
if dc.CacheSize != nil {
|
||||||
|
s.conf.CacheSize = *dc.CacheSize
|
||||||
|
restart = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if dc.CacheMinTTL != nil {
|
||||||
|
s.conf.CacheMinTTL = *dc.CacheMinTTL
|
||||||
|
restart = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if dc.CacheMaxTTL != nil {
|
||||||
|
s.conf.CacheMaxTTL = *dc.CacheMaxTTL
|
||||||
|
restart = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if dc.UpstreamMode != nil {
|
||||||
|
switch *dc.UpstreamMode {
|
||||||
|
case "parallel":
|
||||||
|
s.conf.AllServers = true
|
||||||
|
s.conf.FastestAddr = false
|
||||||
|
case "fastest_addr":
|
||||||
|
s.conf.AllServers = false
|
||||||
|
s.conf.FastestAddr = true
|
||||||
|
default:
|
||||||
|
s.conf.AllServers = false
|
||||||
|
s.conf.FastestAddr = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
s.Unlock()
|
||||||
|
s.conf.ConfigModified()
|
||||||
|
return restart
|
||||||
|
}
|
||||||
|
|
||||||
type upstreamJSON struct {
|
type upstreamJSON struct {
|
||||||
Upstreams []string `json:"upstream_dns"` // Upstreams
|
Upstreams []string `json:"upstream_dns"` // Upstreams
|
||||||
BootstrapDNS []string `json:"bootstrap_dns"` // Bootstrap DNS
|
BootstrapDNS []string `json:"bootstrap_dns"` // Bootstrap DNS
|
||||||
|
|
|
@ -29,7 +29,7 @@ func TestDNSForwardHTTTP_handleGetConfig(t *testing.T) {
|
||||||
conf: func() ServerConfig {
|
conf: func() ServerConfig {
|
||||||
return defaultConf
|
return defaultConf
|
||||||
},
|
},
|
||||||
want: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
want: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||||
}, {
|
}, {
|
||||||
name: "fastest_addr",
|
name: "fastest_addr",
|
||||||
conf: func() ServerConfig {
|
conf: func() ServerConfig {
|
||||||
|
@ -37,7 +37,7 @@ func TestDNSForwardHTTTP_handleGetConfig(t *testing.T) {
|
||||||
conf.FastestAddr = true
|
conf.FastestAddr = true
|
||||||
return conf
|
return conf
|
||||||
},
|
},
|
||||||
want: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"fastest_addr\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
want: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"fastest_addr\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||||
}, {
|
}, {
|
||||||
name: "parallel",
|
name: "parallel",
|
||||||
conf: func() ServerConfig {
|
conf: func() ServerConfig {
|
||||||
|
@ -45,7 +45,7 @@ func TestDNSForwardHTTTP_handleGetConfig(t *testing.T) {
|
||||||
conf.AllServers = true
|
conf.AllServers = true
|
||||||
return conf
|
return conf
|
||||||
},
|
},
|
||||||
want: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"parallel\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
want: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"parallel\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||||
}}
|
}}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
|
@ -73,7 +73,7 @@ func TestDNSForwardHTTTP_handleSetConfig(t *testing.T) {
|
||||||
|
|
||||||
w := httptest.NewRecorder()
|
w := httptest.NewRecorder()
|
||||||
|
|
||||||
const defaultConfJSON = "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}"
|
const defaultConfJSON = "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n"
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
req string
|
req string
|
||||||
|
@ -83,52 +83,52 @@ func TestDNSForwardHTTTP_handleSetConfig(t *testing.T) {
|
||||||
name: "upstream_dns",
|
name: "upstream_dns",
|
||||||
req: "{\"upstream_dns\":[\"8.8.8.8:77\",\"8.8.4.4:77\"]}",
|
req: "{\"upstream_dns\":[\"8.8.8.8:77\",\"8.8.4.4:77\"]}",
|
||||||
wantSet: "",
|
wantSet: "",
|
||||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:77\",\"8.8.4.4:77\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
wantGet: "{\"upstream_dns\":[\"8.8.8.8:77\",\"8.8.4.4:77\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||||
}, {
|
}, {
|
||||||
name: "bootstraps",
|
name: "bootstraps",
|
||||||
req: "{\"bootstrap_dns\":[\"9.9.9.10\"]}",
|
req: "{\"bootstrap_dns\":[\"9.9.9.10\"]}",
|
||||||
wantSet: "",
|
wantSet: "",
|
||||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||||
}, {
|
}, {
|
||||||
name: "blocking_mode_good",
|
name: "blocking_mode_good",
|
||||||
req: "{\"blocking_mode\":\"refused\"}",
|
req: "{\"blocking_mode\":\"refused\"}",
|
||||||
wantSet: "",
|
wantSet: "",
|
||||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"refused\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"refused\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||||
}, {
|
}, {
|
||||||
name: "blocking_mode_bad",
|
name: "blocking_mode_bad",
|
||||||
req: "{\"blocking_mode\":\"custom_ip\"}",
|
req: "{\"blocking_mode\":\"custom_ip\"}",
|
||||||
wantSet: "blocking_mode: incorrect value\n",
|
wantSet: "blocking_mode: incorrect value\n",
|
||||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||||
}, {
|
}, {
|
||||||
name: "ratelimit",
|
name: "ratelimit",
|
||||||
req: "{\"ratelimit\":6}",
|
req: "{\"ratelimit\":6}",
|
||||||
wantSet: "",
|
wantSet: "",
|
||||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":6,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":6,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||||
}, {
|
}, {
|
||||||
name: "edns_cs_enabled",
|
name: "edns_cs_enabled",
|
||||||
req: "{\"edns_cs_enabled\":true}",
|
req: "{\"edns_cs_enabled\":true}",
|
||||||
wantSet: "",
|
wantSet: "",
|
||||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":true,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":true,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||||
}, {
|
}, {
|
||||||
name: "dnssec_enabled",
|
name: "dnssec_enabled",
|
||||||
req: "{\"dnssec_enabled\":true}",
|
req: "{\"dnssec_enabled\":true}",
|
||||||
wantSet: "",
|
wantSet: "",
|
||||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":true,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":true,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||||
}, {
|
}, {
|
||||||
name: "cache_size",
|
name: "cache_size",
|
||||||
req: "{\"cache_size\":1024}",
|
req: "{\"cache_size\":1024}",
|
||||||
wantSet: "",
|
wantSet: "",
|
||||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":1024,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":1024,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||||
}, {
|
}, {
|
||||||
name: "upstream_mode_parallel",
|
name: "upstream_mode_parallel",
|
||||||
req: "{\"upstream_mode\":\"parallel\"}",
|
req: "{\"upstream_mode\":\"parallel\"}",
|
||||||
wantSet: "",
|
wantSet: "",
|
||||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"parallel\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"parallel\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||||
}, {
|
}, {
|
||||||
name: "upstream_mode_fastest_addr",
|
name: "upstream_mode_fastest_addr",
|
||||||
req: "{\"upstream_mode\":\"fastest_addr\"}",
|
req: "{\"upstream_mode\":\"fastest_addr\"}",
|
||||||
wantSet: "",
|
wantSet: "",
|
||||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"fastest_addr\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"fastest_addr\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||||
}, {
|
}, {
|
||||||
name: "upstream_dns_bad",
|
name: "upstream_dns_bad",
|
||||||
req: "{\"upstream_dns\":[\"\"]}",
|
req: "{\"upstream_dns\":[\"\"]}",
|
||||||
|
|
|
@ -1,12 +1,14 @@
|
||||||
package home
|
package home
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/rand"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math"
|
||||||
|
"math/big"
|
||||||
"net/http"
|
"net/http"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
@ -76,7 +78,6 @@ func InitAuth(dbFilename string, users []User, sessionTTL uint32) *Auth {
|
||||||
a := Auth{}
|
a := Auth{}
|
||||||
a.sessionTTL = sessionTTL
|
a.sessionTTL = sessionTTL
|
||||||
a.sessions = make(map[string]*session)
|
a.sessions = make(map[string]*session)
|
||||||
rand.Seed(time.Now().UTC().Unix())
|
|
||||||
var err error
|
var err error
|
||||||
a.db, err = bbolt.Open(dbFilename, 0o644, nil)
|
a.db, err = bbolt.Open(dbFilename, 0o644, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -275,23 +276,28 @@ type loginJSON struct {
|
||||||
Password string `json:"password"`
|
Password string `json:"password"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSession(u *User) []byte {
|
func getSession(u *User) ([]byte, error) {
|
||||||
// the developers don't currently believe that using a
|
maxSalt := big.NewInt(math.MaxUint32)
|
||||||
// non-cryptographic RNG for the session hash salt is
|
salt, err := rand.Int(rand.Reader, maxSalt)
|
||||||
// insecure
|
if err != nil {
|
||||||
salt := rand.Uint32() //nolint:gosec
|
return nil, err
|
||||||
d := []byte(fmt.Sprintf("%d%s%s", salt, u.Name, u.PasswordHash))
|
|
||||||
hash := sha256.Sum256(d)
|
|
||||||
return hash[:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Auth) httpCookie(req loginJSON) string {
|
|
||||||
u := a.UserFind(req.Name, req.Password)
|
|
||||||
if len(u.Name) == 0 {
|
|
||||||
return ""
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sess := getSession(&u)
|
d := []byte(fmt.Sprintf("%s%s%s", salt, u.Name, u.PasswordHash))
|
||||||
|
hash := sha256.Sum256(d)
|
||||||
|
return hash[:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Auth) httpCookie(req loginJSON) (string, error) {
|
||||||
|
u := a.UserFind(req.Name, req.Password)
|
||||||
|
if len(u.Name) == 0 {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
sess, err := getSession(&u)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
now := time.Now().UTC()
|
now := time.Now().UTC()
|
||||||
expire := now.Add(cookieTTL * time.Hour)
|
expire := now.Add(cookieTTL * time.Hour)
|
||||||
|
@ -305,7 +311,7 @@ func (a *Auth) httpCookie(req loginJSON) string {
|
||||||
a.addSession(sess, &s)
|
a.addSession(sess, &s)
|
||||||
|
|
||||||
return fmt.Sprintf("%s=%s; Path=/; HttpOnly; Expires=%s",
|
return fmt.Sprintf("%s=%s; Path=/; HttpOnly; Expires=%s",
|
||||||
sessionCookieName, hex.EncodeToString(sess), expstr)
|
sessionCookieName, hex.EncodeToString(sess), expstr), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleLogin(w http.ResponseWriter, r *http.Request) {
|
func handleLogin(w http.ResponseWriter, r *http.Request) {
|
||||||
|
@ -316,7 +322,11 @@ func handleLogin(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
cookie := Context.auth.httpCookie(req)
|
cookie, err := Context.auth.httpCookie(req)
|
||||||
|
if err != nil {
|
||||||
|
httpError(w, http.StatusBadRequest, "crypto rand reader: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
if len(cookie) == 0 {
|
if len(cookie) == 0 {
|
||||||
log.Info("Auth: invalid user name or password: name=%q", req.Name)
|
log.Info("Auth: invalid user name or password: name=%q", req.Name)
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
|
@ -369,29 +379,10 @@ func parseCookie(cookie string) string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint(gocyclo)
|
// optionalAuthThird return true if user should authenticate first.
|
||||||
func optionalAuth(handler func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
|
func optionalAuthThird(w http.ResponseWriter, r *http.Request) (authFirst bool) {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
authFirst = false
|
||||||
if r.URL.Path == "/login.html" {
|
|
||||||
// redirect to dashboard if already authenticated
|
|
||||||
authRequired := Context.auth != nil && Context.auth.AuthRequired()
|
|
||||||
cookie, err := r.Cookie(sessionCookieName)
|
|
||||||
if authRequired && err == nil {
|
|
||||||
r := Context.auth.CheckSession(cookie.Value)
|
|
||||||
if r == 0 {
|
|
||||||
w.Header().Set("Location", "/")
|
|
||||||
w.WriteHeader(http.StatusFound)
|
|
||||||
return
|
|
||||||
} else if r < 0 {
|
|
||||||
log.Debug("Auth: invalid cookie value: %s", cookie)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} else if strings.HasPrefix(r.URL.Path, "/assets/") ||
|
|
||||||
strings.HasPrefix(r.URL.Path, "/login.") {
|
|
||||||
// process as usual
|
|
||||||
// no additional auth requirements
|
|
||||||
} else if Context.auth != nil && Context.auth.AuthRequired() {
|
|
||||||
// redirect to login page if not authenticated
|
// redirect to login page if not authenticated
|
||||||
ok := false
|
ok := false
|
||||||
cookie, err := r.Cookie(sessionCookieName)
|
cookie, err := r.Cookie(sessionCookieName)
|
||||||
|
@ -431,6 +422,34 @@ func optionalAuth(handler func(http.ResponseWriter, *http.Request)) func(http.Re
|
||||||
w.WriteHeader(http.StatusForbidden)
|
w.WriteHeader(http.StatusForbidden)
|
||||||
_, _ = w.Write([]byte("Forbidden"))
|
_, _ = w.Write([]byte("Forbidden"))
|
||||||
}
|
}
|
||||||
|
authFirst = true
|
||||||
|
}
|
||||||
|
return authFirst
|
||||||
|
}
|
||||||
|
|
||||||
|
func optionalAuth(handler func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
|
||||||
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
if r.URL.Path == "/login.html" {
|
||||||
|
// redirect to dashboard if already authenticated
|
||||||
|
authRequired := Context.auth != nil && Context.auth.AuthRequired()
|
||||||
|
cookie, err := r.Cookie(sessionCookieName)
|
||||||
|
if authRequired && err == nil {
|
||||||
|
r := Context.auth.CheckSession(cookie.Value)
|
||||||
|
if r == 0 {
|
||||||
|
w.Header().Set("Location", "/")
|
||||||
|
w.WriteHeader(http.StatusFound)
|
||||||
|
return
|
||||||
|
} else if r < 0 {
|
||||||
|
log.Debug("Auth: invalid cookie value: %s", cookie)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if strings.HasPrefix(r.URL.Path, "/assets/") ||
|
||||||
|
strings.HasPrefix(r.URL.Path, "/login.") {
|
||||||
|
// process as usual
|
||||||
|
// no additional auth requirements
|
||||||
|
} else if Context.auth != nil && Context.auth.AuthRequired() {
|
||||||
|
if optionalAuthThird(w, r) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,8 @@ func TestAuth(t *testing.T) {
|
||||||
assert.True(t, a.CheckSession("notfound") == -1)
|
assert.True(t, a.CheckSession("notfound") == -1)
|
||||||
a.RemoveSession("notfound")
|
a.RemoveSession("notfound")
|
||||||
|
|
||||||
sess := getSession(&users[0])
|
sess, err := getSession(&users[0])
|
||||||
|
assert.Nil(t, err)
|
||||||
sessStr := hex.EncodeToString(sess)
|
sessStr := hex.EncodeToString(sess)
|
||||||
|
|
||||||
now := time.Now().UTC().Unix()
|
now := time.Now().UTC().Unix()
|
||||||
|
@ -136,7 +137,8 @@ func TestAuthHTTP(t *testing.T) {
|
||||||
assert.True(t, handlerCalled)
|
assert.True(t, handlerCalled)
|
||||||
|
|
||||||
// perform login
|
// perform login
|
||||||
cookie := Context.auth.httpCookie(loginJSON{Name: "name", Password: "password"})
|
cookie, err := Context.auth.httpCookie(loginJSON{Name: "name", Password: "password"})
|
||||||
|
assert.Nil(t, err)
|
||||||
assert.True(t, cookie != "")
|
assert.True(t, cookie != "")
|
||||||
|
|
||||||
// get /
|
// get /
|
||||||
|
|
|
@ -88,60 +88,6 @@ func isRunning() bool {
|
||||||
return Context.dnsServer != nil && Context.dnsServer.IsRunning()
|
return Context.dnsServer != nil && Context.dnsServer.IsRunning()
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint (gocyclo)
|
|
||||||
// Return TRUE if IP is within public Internet IP range
|
|
||||||
func isPublicIP(ip net.IP) bool {
|
|
||||||
ip4 := ip.To4()
|
|
||||||
if ip4 != nil {
|
|
||||||
switch ip4[0] {
|
|
||||||
case 0:
|
|
||||||
return false // software
|
|
||||||
case 10:
|
|
||||||
return false // private network
|
|
||||||
case 127:
|
|
||||||
return false // loopback
|
|
||||||
case 169:
|
|
||||||
if ip4[1] == 254 {
|
|
||||||
return false // link-local
|
|
||||||
}
|
|
||||||
case 172:
|
|
||||||
if ip4[1] >= 16 && ip4[1] <= 31 {
|
|
||||||
return false // private network
|
|
||||||
}
|
|
||||||
case 192:
|
|
||||||
if (ip4[1] == 0 && ip4[2] == 0) || // private network
|
|
||||||
(ip4[1] == 0 && ip4[2] == 2) || // documentation
|
|
||||||
(ip4[1] == 88 && ip4[2] == 99) || // reserved
|
|
||||||
(ip4[1] == 168) { // private network
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
case 198:
|
|
||||||
if (ip4[1] == 18 || ip4[2] == 19) || // private network
|
|
||||||
(ip4[1] == 51 || ip4[2] == 100) { // documentation
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
case 203:
|
|
||||||
if ip4[1] == 0 && ip4[2] == 113 { // documentation
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
case 224:
|
|
||||||
if ip4[1] == 0 && ip4[2] == 0 { // multicast
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
case 255:
|
|
||||||
if ip4[1] == 255 && ip4[2] == 255 && ip4[3] == 255 { // subnet
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if ip.IsLoopback() || ip.IsLinkLocalMulticast() || ip.IsLinkLocalUnicast() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func onDNSRequest(d *proxy.DNSContext) {
|
func onDNSRequest(d *proxy.DNSContext) {
|
||||||
ip := dnsforward.GetIPString(d.Addr)
|
ip := dnsforward.GetIPString(d.Addr)
|
||||||
if ip == "" {
|
if ip == "" {
|
||||||
|
@ -153,7 +99,7 @@ func onDNSRequest(d *proxy.DNSContext) {
|
||||||
if !ipAddr.IsLoopback() {
|
if !ipAddr.IsLoopback() {
|
||||||
Context.rdns.Begin(ip)
|
Context.rdns.Begin(ip)
|
||||||
}
|
}
|
||||||
if isPublicIP(ipAddr) {
|
if !Context.ipDetector.detectSpecialNetwork(ipAddr) {
|
||||||
Context.whois.Begin(ip)
|
Context.whois.Begin(ip)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -327,7 +273,7 @@ func startDNSServer() error {
|
||||||
if !ipAddr.IsLoopback() {
|
if !ipAddr.IsLoopback() {
|
||||||
Context.rdns.Begin(ip)
|
Context.rdns.Begin(ip)
|
||||||
}
|
}
|
||||||
if isPublicIP(ipAddr) {
|
if !Context.ipDetector.detectSpecialNetwork(ipAddr) {
|
||||||
Context.whois.Begin(ip)
|
Context.whois.Begin(ip)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"hash/crc32"
|
"hash/crc32"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"regexp"
|
"regexp"
|
||||||
|
@ -497,46 +498,7 @@ func (f *Filtering) update(filter *filter) (bool, error) {
|
||||||
return b, err
|
return b, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// nolint(gocyclo)
|
func (f *Filtering) read(reader io.Reader, tmpFile *os.File, filter *filter) (int, error) {
|
||||||
func (f *Filtering) updateIntl(filter *filter) (bool, error) {
|
|
||||||
log.Tracef("Downloading update for filter %d from %s", filter.ID, filter.URL)
|
|
||||||
|
|
||||||
tmpFile, err := ioutil.TempFile(filepath.Join(Context.getDataDir(), filterDir), "")
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if tmpFile != nil {
|
|
||||||
_ = tmpFile.Close()
|
|
||||||
_ = os.Remove(tmpFile.Name())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
var reader io.Reader
|
|
||||||
if filepath.IsAbs(filter.URL) {
|
|
||||||
f, err := os.Open(filter.URL)
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("open file: %w", err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
reader = f
|
|
||||||
} else {
|
|
||||||
resp, err := Context.client.Get(filter.URL)
|
|
||||||
if resp != nil && resp.Body != nil {
|
|
||||||
defer resp.Body.Close()
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
log.Printf("Couldn't request filter from URL %s, skipping: %s", filter.URL, err)
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp.StatusCode != 200 {
|
|
||||||
log.Printf("Got status code %d from URL %s, skipping", resp.StatusCode, filter.URL)
|
|
||||||
return false, fmt.Errorf("got status code != 200: %d", resp.StatusCode)
|
|
||||||
}
|
|
||||||
reader = resp.Body
|
|
||||||
}
|
|
||||||
|
|
||||||
htmlTest := true
|
htmlTest := true
|
||||||
firstChunk := make([]byte, 4*1024)
|
firstChunk := make([]byte, 4*1024)
|
||||||
firstChunkLen := 0
|
firstChunkLen := 0
|
||||||
|
@ -556,12 +518,12 @@ func (f *Filtering) updateIntl(filter *filter) (bool, error) {
|
||||||
|
|
||||||
if firstChunkLen == len(firstChunk) || err == io.EOF {
|
if firstChunkLen == len(firstChunk) || err == io.EOF {
|
||||||
if !isPrintableText(firstChunk, firstChunkLen) {
|
if !isPrintableText(firstChunk, firstChunkLen) {
|
||||||
return false, fmt.Errorf("data contains non-printable characters")
|
return total, fmt.Errorf("data contains non-printable characters")
|
||||||
}
|
}
|
||||||
|
|
||||||
s := strings.ToLower(string(firstChunk))
|
s := strings.ToLower(string(firstChunk))
|
||||||
if strings.Contains(s, "<html") || strings.Contains(s, "<!doctype") {
|
if strings.Contains(s, "<html") || strings.Contains(s, "<!doctype") {
|
||||||
return false, fmt.Errorf("data is HTML, not plain text")
|
return total, fmt.Errorf("data is HTML, not plain text")
|
||||||
}
|
}
|
||||||
|
|
||||||
htmlTest = false
|
htmlTest = false
|
||||||
|
@ -571,17 +533,70 @@ func (f *Filtering) updateIntl(filter *filter) (bool, error) {
|
||||||
|
|
||||||
_, err2 := tmpFile.Write(buf[:n])
|
_, err2 := tmpFile.Write(buf[:n])
|
||||||
if err2 != nil {
|
if err2 != nil {
|
||||||
return false, err2
|
return total, err2
|
||||||
}
|
}
|
||||||
|
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
break
|
return total, nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Couldn't fetch filter contents from URL %s, skipping: %s", filter.URL, err)
|
log.Printf("Couldn't fetch filter contents from URL %s, skipping: %s", filter.URL, err)
|
||||||
return false, err
|
return total, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateIntl returns true if filter update performed successfully.
|
||||||
|
func (f *Filtering) updateIntl(filter *filter) (updated bool, err error) {
|
||||||
|
updated = false
|
||||||
|
log.Tracef("Downloading update for filter %d from %s", filter.ID, filter.URL)
|
||||||
|
|
||||||
|
tmpFile, err := ioutil.TempFile(filepath.Join(Context.getDataDir(), filterDir), "")
|
||||||
|
if err != nil {
|
||||||
|
return updated, err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if tmpFile != nil {
|
||||||
|
if err := tmpFile.Close(); err != nil {
|
||||||
|
log.Printf("Couldn't close temporary file: %s", err)
|
||||||
|
}
|
||||||
|
tmpFileName := tmpFile.Name()
|
||||||
|
if err := os.Remove(tmpFileName); err != nil {
|
||||||
|
log.Printf("Couldn't delete temporary file %s: %s", tmpFileName, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
var reader io.Reader
|
||||||
|
if filepath.IsAbs(filter.URL) {
|
||||||
|
f, err := os.Open(filter.URL)
|
||||||
|
if err != nil {
|
||||||
|
return updated, fmt.Errorf("open file: %w", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
reader = f
|
||||||
|
} else {
|
||||||
|
resp, err := Context.client.Get(filter.URL)
|
||||||
|
if resp != nil && resp.Body != nil {
|
||||||
|
defer resp.Body.Close()
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Couldn't request filter from URL %s, skipping: %s", filter.URL, err)
|
||||||
|
return updated, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
log.Printf("Got status code %d from URL %s, skipping", resp.StatusCode, filter.URL)
|
||||||
|
return updated, fmt.Errorf("got status code != 200: %d", resp.StatusCode)
|
||||||
|
}
|
||||||
|
reader = resp.Body
|
||||||
|
}
|
||||||
|
|
||||||
|
total, err := f.read(reader, tmpFile, filter)
|
||||||
|
if err != nil {
|
||||||
|
return updated, err
|
||||||
|
}
|
||||||
|
|
||||||
// Extract filter name and count number of rules
|
// Extract filter name and count number of rules
|
||||||
_, _ = tmpFile.Seek(0, io.SeekStart)
|
_, _ = tmpFile.Seek(0, io.SeekStart)
|
||||||
|
@ -589,7 +604,7 @@ func (f *Filtering) updateIntl(filter *filter) (bool, error) {
|
||||||
// Check if the filter has been really changed
|
// Check if the filter has been really changed
|
||||||
if filter.checksum == checksum {
|
if filter.checksum == checksum {
|
||||||
log.Tracef("Filter #%d at URL %s hasn't changed, not updating it", filter.ID, filter.URL)
|
log.Tracef("Filter #%d at URL %s hasn't changed, not updating it", filter.ID, filter.URL)
|
||||||
return false, nil
|
return updated, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("Filter %d has been updated: %d bytes, %d rules",
|
log.Printf("Filter %d has been updated: %d bytes, %d rules",
|
||||||
|
@ -606,11 +621,12 @@ func (f *Filtering) updateIntl(filter *filter) (bool, error) {
|
||||||
_ = tmpFile.Close()
|
_ = tmpFile.Close()
|
||||||
err = os.Rename(tmpFile.Name(), filterFilePath)
|
err = os.Rename(tmpFile.Name(), filterFilePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return updated, err
|
||||||
}
|
}
|
||||||
tmpFile = nil
|
tmpFile = nil
|
||||||
|
updated = true
|
||||||
|
|
||||||
return true, nil
|
return updated, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// loads filter contents from the file in dataDir
|
// loads filter contents from the file in dataDir
|
||||||
|
|
|
@ -65,6 +65,8 @@ type homeContext struct {
|
||||||
autoHosts util.AutoHosts // IP-hostname pairs taken from system configuration (e.g. /etc/hosts) files
|
autoHosts util.AutoHosts // IP-hostname pairs taken from system configuration (e.g. /etc/hosts) files
|
||||||
updater *update.Updater
|
updater *update.Updater
|
||||||
|
|
||||||
|
ipDetector *ipDetector
|
||||||
|
|
||||||
// Runtime properties
|
// Runtime properties
|
||||||
// --
|
// --
|
||||||
|
|
||||||
|
@ -140,28 +142,7 @@ func version() string {
|
||||||
return fmt.Sprintf(msg, versionString, updateChannel, runtime.GOOS, runtime.GOARCH)
|
return fmt.Sprintf(msg, versionString, updateChannel, runtime.GOOS, runtime.GOARCH)
|
||||||
}
|
}
|
||||||
|
|
||||||
// run initializes configuration and runs the AdGuard Home
|
func setupContext(args options) {
|
||||||
// run is a blocking method!
|
|
||||||
// nolint
|
|
||||||
func run(args options) {
|
|
||||||
// configure config filename
|
|
||||||
initConfigFilename(args)
|
|
||||||
|
|
||||||
// configure working dir and config path
|
|
||||||
initWorkingDir(args)
|
|
||||||
|
|
||||||
// configure log level and output
|
|
||||||
configureLogger(args)
|
|
||||||
|
|
||||||
// Go memory hacks
|
|
||||||
memoryUsage(args)
|
|
||||||
|
|
||||||
// print the first message after logger is configured
|
|
||||||
log.Println(version())
|
|
||||||
log.Debug("Current working directory is %s", Context.workDir)
|
|
||||||
if args.runningAsService {
|
|
||||||
log.Info("AdGuard Home is running as a service")
|
|
||||||
}
|
|
||||||
Context.runningAsService = args.runningAsService
|
Context.runningAsService = args.runningAsService
|
||||||
Context.disableUpdate = args.disableUpdate
|
Context.disableUpdate = args.disableUpdate
|
||||||
|
|
||||||
|
@ -180,6 +161,7 @@ func run(args options) {
|
||||||
Proxy: getHTTPProxy,
|
Proxy: getHTTPProxy,
|
||||||
TLSClientConfig: &tls.Config{
|
TLSClientConfig: &tls.Config{
|
||||||
RootCAs: Context.tlsRoots,
|
RootCAs: Context.tlsRoots,
|
||||||
|
MinVersion: tls.VersionTLS12,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
Context.client = &http.Client{
|
Context.client = &http.Client{
|
||||||
|
@ -205,12 +187,9 @@ func run(args options) {
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// 'clients' module uses 'dnsfilter' module's static data (dnsfilter.BlockedSvcKnown()),
|
func setupConfig(args options) {
|
||||||
// so we have to initialize dnsfilter's static data first,
|
|
||||||
// but also avoid relying on automatic Go init() function
|
|
||||||
dnsfilter.InitModule()
|
|
||||||
|
|
||||||
config.DHCP.WorkDir = Context.workDir
|
config.DHCP.WorkDir = Context.workDir
|
||||||
config.DHCP.HTTPRegister = httpRegister
|
config.DHCP.HTTPRegister = httpRegister
|
||||||
config.DHCP.ConfigModified = onConfigModified
|
config.DHCP.ConfigModified = onConfigModified
|
||||||
|
@ -251,6 +230,37 @@ func run(args options) {
|
||||||
if len(args.pidFile) != 0 && writePIDFile(args.pidFile) {
|
if len(args.pidFile) != 0 && writePIDFile(args.pidFile) {
|
||||||
Context.pidFileName = args.pidFile
|
Context.pidFileName = args.pidFile
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// run performs configurating and starts AdGuard Home.
|
||||||
|
func run(args options) {
|
||||||
|
// configure config filename
|
||||||
|
initConfigFilename(args)
|
||||||
|
|
||||||
|
// configure working dir and config path
|
||||||
|
initWorkingDir(args)
|
||||||
|
|
||||||
|
// configure log level and output
|
||||||
|
configureLogger(args)
|
||||||
|
|
||||||
|
// Go memory hacks
|
||||||
|
memoryUsage(args)
|
||||||
|
|
||||||
|
// print the first message after logger is configured
|
||||||
|
log.Println(version())
|
||||||
|
log.Debug("Current working directory is %s", Context.workDir)
|
||||||
|
if args.runningAsService {
|
||||||
|
log.Info("AdGuard Home is running as a service")
|
||||||
|
}
|
||||||
|
|
||||||
|
setupContext(args)
|
||||||
|
|
||||||
|
// clients package uses dnsfilter package's static data (dnsfilter.BlockedSvcKnown()),
|
||||||
|
// so we have to initialize dnsfilter's static data first,
|
||||||
|
// but also avoid relying on automatic Go init() function
|
||||||
|
dnsfilter.InitModule()
|
||||||
|
|
||||||
|
setupConfig(args)
|
||||||
|
|
||||||
if !Context.firstRun {
|
if !Context.firstRun {
|
||||||
// Save the updated config
|
// Save the updated config
|
||||||
|
@ -322,6 +332,11 @@ func run(args options) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Context.ipDetector, err = newIPDetector()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
Context.web.Start()
|
Context.web.Start()
|
||||||
|
|
||||||
// wait indefinitely for other go-routines to complete their job
|
// wait indefinitely for other go-routines to complete their job
|
||||||
|
|
72
internal/home/ipdetector.go
Normal file
72
internal/home/ipdetector.go
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
package home
|
||||||
|
|
||||||
|
import "net"
|
||||||
|
|
||||||
|
// ipDetector describes IP address properties.
|
||||||
|
type ipDetector struct {
|
||||||
|
nets []*net.IPNet
|
||||||
|
}
|
||||||
|
|
||||||
|
// newIPDetector returns a new IP detector.
|
||||||
|
func newIPDetector() (ipd *ipDetector, err error) {
|
||||||
|
specialNetworks := []string{
|
||||||
|
"0.0.0.0/8",
|
||||||
|
"10.0.0.0/8",
|
||||||
|
"100.64.0.0/10",
|
||||||
|
"127.0.0.0/8",
|
||||||
|
"169.254.0.0/16",
|
||||||
|
"172.16.0.0/12",
|
||||||
|
"192.0.0.0/24",
|
||||||
|
"192.0.0.0/29",
|
||||||
|
"192.0.2.0/24",
|
||||||
|
"192.88.99.0/24",
|
||||||
|
"192.168.0.0/16",
|
||||||
|
"198.18.0.0/15",
|
||||||
|
"198.51.100.0/24",
|
||||||
|
"203.0.113.0/24",
|
||||||
|
"240.0.0.0/4",
|
||||||
|
"255.255.255.255/32",
|
||||||
|
"::1/128",
|
||||||
|
"::/128",
|
||||||
|
"64:ff9b::/96",
|
||||||
|
// Since this network is used for mapping IPv4 addresses, we
|
||||||
|
// don't include it.
|
||||||
|
// "::ffff:0:0/96",
|
||||||
|
"100::/64",
|
||||||
|
"2001::/23",
|
||||||
|
"2001::/32",
|
||||||
|
"2001:2::/48",
|
||||||
|
"2001:db8::/32",
|
||||||
|
"2001:10::/28",
|
||||||
|
"2002::/16",
|
||||||
|
"fc00::/7",
|
||||||
|
"fe80::/10",
|
||||||
|
}
|
||||||
|
|
||||||
|
ipd = &ipDetector{
|
||||||
|
nets: make([]*net.IPNet, len(specialNetworks)),
|
||||||
|
}
|
||||||
|
for i, ipnetStr := range specialNetworks {
|
||||||
|
_, ipnet, err := net.ParseCIDR(ipnetStr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
ipd.nets[i] = ipnet
|
||||||
|
}
|
||||||
|
|
||||||
|
return ipd, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// detectSpecialNetwork returns true if IP address is contained by any of
|
||||||
|
// special-purpose IP address registries according to RFC-6890
|
||||||
|
// (https://tools.ietf.org/html/rfc6890).
|
||||||
|
func (ipd *ipDetector) detectSpecialNetwork(ip net.IP) bool {
|
||||||
|
for _, ipnet := range ipd.nets {
|
||||||
|
if ipnet.Contains(ip) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
146
internal/home/ipdetector_test.go
Normal file
146
internal/home/ipdetector_test.go
Normal file
|
@ -0,0 +1,146 @@
|
||||||
|
package home
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestIPDetector_detectSpecialNetwork(t *testing.T) {
|
||||||
|
var ipd *ipDetector
|
||||||
|
|
||||||
|
t.Run("newIPDetector", func(t *testing.T) {
|
||||||
|
var err error
|
||||||
|
ipd, err = newIPDetector()
|
||||||
|
assert.Nil(t, err)
|
||||||
|
})
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
ip net.IP
|
||||||
|
want bool
|
||||||
|
}{{
|
||||||
|
name: "not_specific",
|
||||||
|
ip: net.ParseIP("8.8.8.8"),
|
||||||
|
want: false,
|
||||||
|
}, {
|
||||||
|
name: "this_host_on_this_network",
|
||||||
|
ip: net.ParseIP("0.0.0.0"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "private-Use",
|
||||||
|
ip: net.ParseIP("10.0.0.0"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "shared_address_space",
|
||||||
|
ip: net.ParseIP("100.64.0.0"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "loopback",
|
||||||
|
ip: net.ParseIP("127.0.0.0"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "link_local",
|
||||||
|
ip: net.ParseIP("169.254.0.0"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "private-use",
|
||||||
|
ip: net.ParseIP("172.16.0.0"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "ietf_protocol_assignments",
|
||||||
|
ip: net.ParseIP("192.0.0.0"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "ds-lite",
|
||||||
|
ip: net.ParseIP("192.0.0.0"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "documentation_(test-net-1)",
|
||||||
|
ip: net.ParseIP("192.0.2.0"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "6to4_relay_anycast",
|
||||||
|
ip: net.ParseIP("192.88.99.0"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "private-use",
|
||||||
|
ip: net.ParseIP("192.168.0.0"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "benchmarking",
|
||||||
|
ip: net.ParseIP("198.18.0.0"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "documentation_(test-net-2)",
|
||||||
|
ip: net.ParseIP("198.51.100.0"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "documentation_(test-net-3)",
|
||||||
|
ip: net.ParseIP("203.0.113.0"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "reserved",
|
||||||
|
ip: net.ParseIP("240.0.0.0"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "limited_broadcast",
|
||||||
|
ip: net.ParseIP("255.255.255.255"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "loopback_address",
|
||||||
|
ip: net.ParseIP("::1"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "unspecified_address",
|
||||||
|
ip: net.ParseIP("::"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "ipv4-ipv6_translation",
|
||||||
|
ip: net.ParseIP("64:ff9b::"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "discard-only_address_block",
|
||||||
|
ip: net.ParseIP("100::"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "ietf_protocol_assignments",
|
||||||
|
ip: net.ParseIP("2001::"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "teredo",
|
||||||
|
ip: net.ParseIP("2001::"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "benchmarking",
|
||||||
|
ip: net.ParseIP("2001:2::"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "documentation",
|
||||||
|
ip: net.ParseIP("2001:db8::"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "orchid",
|
||||||
|
ip: net.ParseIP("2001:10::"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "6to4",
|
||||||
|
ip: net.ParseIP("2002::"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "unique-local",
|
||||||
|
ip: net.ParseIP("fc00::"),
|
||||||
|
want: true,
|
||||||
|
}, {
|
||||||
|
name: "linked-scoped_unicast",
|
||||||
|
ip: net.ParseIP("fe80::"),
|
||||||
|
want: true,
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
assert.Equal(t, tc.want, ipd.detectSpecialNetwork(tc.ip))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -2,6 +2,7 @@ package querylog
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
|
"encoding/json"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
@ -11,86 +12,206 @@ import (
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
)
|
)
|
||||||
|
|
||||||
// decodeLogEntry - decodes query log entry from a line
|
var logEntryHandlers = map[string](func(t json.Token, ent *logEntry) error){
|
||||||
// nolint (gocyclo)
|
"IP": func(t json.Token, ent *logEntry) error {
|
||||||
func decodeLogEntry(ent *logEntry, str string) {
|
v, ok := t.(string)
|
||||||
var b bool
|
if !ok {
|
||||||
var i int
|
return nil
|
||||||
var err error
|
|
||||||
for {
|
|
||||||
k, v, t := readJSON(&str)
|
|
||||||
if t == jsonTErr {
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
switch k {
|
|
||||||
case "IP":
|
|
||||||
if len(ent.IP) == 0 {
|
if len(ent.IP) == 0 {
|
||||||
ent.IP = v
|
ent.IP = v
|
||||||
}
|
}
|
||||||
case "T":
|
return nil
|
||||||
|
},
|
||||||
|
"T": func(t json.Token, ent *logEntry) error {
|
||||||
|
v, ok := t.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var err error
|
||||||
ent.Time, err = time.Parse(time.RFC3339, v)
|
ent.Time, err = time.Parse(time.RFC3339, v)
|
||||||
|
return err
|
||||||
case "QH":
|
},
|
||||||
|
"QH": func(t json.Token, ent *logEntry) error {
|
||||||
|
v, ok := t.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
ent.QHost = v
|
ent.QHost = v
|
||||||
case "QT":
|
return nil
|
||||||
|
},
|
||||||
|
"QT": func(t json.Token, ent *logEntry) error {
|
||||||
|
v, ok := t.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
ent.QType = v
|
ent.QType = v
|
||||||
case "QC":
|
return nil
|
||||||
|
},
|
||||||
|
"QC": func(t json.Token, ent *logEntry) error {
|
||||||
|
v, ok := t.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
ent.QClass = v
|
ent.QClass = v
|
||||||
|
return nil
|
||||||
case "CP":
|
},
|
||||||
|
"CP": func(t json.Token, ent *logEntry) error {
|
||||||
|
v, ok := t.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var err error
|
||||||
ent.ClientProto, err = NewClientProto(v)
|
ent.ClientProto, err = NewClientProto(v)
|
||||||
|
return err
|
||||||
case "Answer":
|
},
|
||||||
|
"Answer": func(t json.Token, ent *logEntry) error {
|
||||||
|
v, ok := t.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var err error
|
||||||
ent.Answer, err = base64.StdEncoding.DecodeString(v)
|
ent.Answer, err = base64.StdEncoding.DecodeString(v)
|
||||||
case "OrigAnswer":
|
return err
|
||||||
|
},
|
||||||
|
"OrigAnswer": func(t json.Token, ent *logEntry) error {
|
||||||
|
v, ok := t.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var err error
|
||||||
ent.OrigAnswer, err = base64.StdEncoding.DecodeString(v)
|
ent.OrigAnswer, err = base64.StdEncoding.DecodeString(v)
|
||||||
|
return err
|
||||||
case "IsFiltered":
|
},
|
||||||
b, err = strconv.ParseBool(v)
|
"IsFiltered": func(t json.Token, ent *logEntry) error {
|
||||||
ent.Result.IsFiltered = b
|
v, ok := t.(string)
|
||||||
case "Rule":
|
if !ok {
|
||||||
ent.Result.Rule = v
|
return nil
|
||||||
case "FilterID":
|
}
|
||||||
i, err = strconv.Atoi(v)
|
b, err := strconv.ParseBool(v)
|
||||||
ent.Result.FilterID = int64(i)
|
|
||||||
case "Reason":
|
|
||||||
i, err = strconv.Atoi(v)
|
|
||||||
ent.Result.Reason = dnsfilter.Reason(i)
|
|
||||||
case "ServiceName":
|
|
||||||
ent.Result.ServiceName = v
|
|
||||||
|
|
||||||
case "Upstream":
|
|
||||||
ent.Upstream = v
|
|
||||||
case "Elapsed":
|
|
||||||
i, err = strconv.Atoi(v)
|
|
||||||
ent.Elapsed = time.Duration(i)
|
|
||||||
|
|
||||||
// pre-v0.99.3 compatibility:
|
|
||||||
case "Question":
|
|
||||||
var qstr []byte
|
|
||||||
qstr, err = base64.StdEncoding.DecodeString(v)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
return err
|
||||||
|
}
|
||||||
|
ent.Result.IsFiltered = b
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
"Rule": func(t json.Token, ent *logEntry) error {
|
||||||
|
v, ok := t.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ent.Result.Rule = v
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
"FilterID": func(t json.Token, ent *logEntry) error {
|
||||||
|
v, ok := t.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
i, err := strconv.Atoi(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ent.Result.FilterID = int64(i)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
"Reason": func(t json.Token, ent *logEntry) error {
|
||||||
|
v, ok := t.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
i, err := strconv.Atoi(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ent.Result.Reason = dnsfilter.Reason(i)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
"ServiceName": func(t json.Token, ent *logEntry) error {
|
||||||
|
v, ok := t.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ent.Result.ServiceName = v
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
"Upstream": func(t json.Token, ent *logEntry) error {
|
||||||
|
v, ok := t.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ent.Upstream = v
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
"Elapsed": func(t json.Token, ent *logEntry) error {
|
||||||
|
v, ok := t.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
i, err := strconv.Atoi(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ent.Elapsed = time.Duration(i)
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
"Question": func(t json.Token, ent *logEntry) error {
|
||||||
|
v, ok := t.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var qstr []byte
|
||||||
|
qstr, err := base64.StdEncoding.DecodeString(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
q := new(dns.Msg)
|
q := new(dns.Msg)
|
||||||
err = q.Unpack(qstr)
|
err = q.Unpack(qstr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
return err
|
||||||
}
|
}
|
||||||
ent.QHost = q.Question[0].Name
|
ent.QHost = q.Question[0].Name
|
||||||
if len(ent.QHost) == 0 {
|
if len(ent.QHost) == 0 {
|
||||||
break
|
return nil // nil???
|
||||||
}
|
}
|
||||||
ent.QHost = ent.QHost[:len(ent.QHost)-1]
|
ent.QHost = ent.QHost[:len(ent.QHost)-1]
|
||||||
ent.QType = dns.TypeToString[q.Question[0].Qtype]
|
ent.QType = dns.TypeToString[q.Question[0].Qtype]
|
||||||
ent.QClass = dns.ClassToString[q.Question[0].Qclass]
|
ent.QClass = dns.ClassToString[q.Question[0].Qclass]
|
||||||
case "Time":
|
return nil
|
||||||
ent.Time, err = time.Parse(time.RFC3339, v)
|
},
|
||||||
|
"Time": func(t json.Token, ent *logEntry) error {
|
||||||
|
v, ok := t.(string)
|
||||||
|
if !ok {
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
var err error
|
||||||
|
ent.Time, err = time.Parse(time.RFC3339, v)
|
||||||
|
return err
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeLogEntry(ent *logEntry, str string) {
|
||||||
|
dec := json.NewDecoder(strings.NewReader(str))
|
||||||
|
for dec.More() {
|
||||||
|
keyToken, err := dec.Token()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug("decodeLogEntry err: %s", err)
|
log.Debug("decodeLogEntry err: %s", err)
|
||||||
break
|
return
|
||||||
|
}
|
||||||
|
if _, ok := keyToken.(json.Delim); ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
key := keyToken.(string)
|
||||||
|
handler, ok := logEntryHandlers[key]
|
||||||
|
value, err := dec.Token()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if ok {
|
||||||
|
if err := handler(value, ent); err != nil {
|
||||||
|
log.Debug("decodeLogEntry err: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -109,72 +230,3 @@ func readJSONValue(s, name string) string {
|
||||||
end := start + i
|
end := start + i
|
||||||
return s[start:end]
|
return s[start:end]
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
jsonTErr = iota
|
|
||||||
jsonTObj
|
|
||||||
jsonTStr
|
|
||||||
jsonTNum
|
|
||||||
jsonTBool
|
|
||||||
)
|
|
||||||
|
|
||||||
// Parse JSON key-value pair
|
|
||||||
// e.g.: "key":VALUE where VALUE is "string", true|false (boolean), or 123.456 (number)
|
|
||||||
// Note the limitations:
|
|
||||||
// . doesn't support whitespace
|
|
||||||
// . doesn't support "null"
|
|
||||||
// . doesn't validate boolean or number
|
|
||||||
// . no proper handling of {} braces
|
|
||||||
// . no handling of [] brackets
|
|
||||||
// Return (key, value, type)
|
|
||||||
func readJSON(ps *string) (string, string, int32) {
|
|
||||||
s := *ps
|
|
||||||
k := ""
|
|
||||||
v := ""
|
|
||||||
t := int32(jsonTErr)
|
|
||||||
|
|
||||||
q1 := strings.IndexByte(s, '"')
|
|
||||||
if q1 == -1 {
|
|
||||||
return k, v, t
|
|
||||||
}
|
|
||||||
q2 := strings.IndexByte(s[q1+1:], '"')
|
|
||||||
if q2 == -1 {
|
|
||||||
return k, v, t
|
|
||||||
}
|
|
||||||
k = s[q1+1 : q1+1+q2]
|
|
||||||
s = s[q1+1+q2+1:]
|
|
||||||
|
|
||||||
if len(s) < 2 || s[0] != ':' {
|
|
||||||
return k, v, t
|
|
||||||
}
|
|
||||||
|
|
||||||
if s[1] == '"' {
|
|
||||||
q2 = strings.IndexByte(s[2:], '"')
|
|
||||||
if q2 == -1 {
|
|
||||||
return k, v, t
|
|
||||||
}
|
|
||||||
v = s[2 : 2+q2]
|
|
||||||
t = jsonTStr
|
|
||||||
s = s[2+q2+1:]
|
|
||||||
|
|
||||||
} else if s[1] == '{' {
|
|
||||||
t = jsonTObj
|
|
||||||
s = s[1+1:]
|
|
||||||
|
|
||||||
} else {
|
|
||||||
sep := strings.IndexAny(s[1:], ",}")
|
|
||||||
if sep == -1 {
|
|
||||||
return k, v, t
|
|
||||||
}
|
|
||||||
v = s[1 : 1+sep]
|
|
||||||
if s[1] == 't' || s[1] == 'f' {
|
|
||||||
t = jsonTBool
|
|
||||||
} else if s[1] == '.' || (s[1] >= '0' && s[1] <= '9') {
|
|
||||||
t = jsonTNum
|
|
||||||
}
|
|
||||||
s = s[1+sep+1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
*ps = s
|
|
||||||
return k, v, t
|
|
||||||
}
|
|
||||||
|
|
|
@ -48,30 +48,3 @@ func TestDecode_decodeQueryLog(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestJSON(t *testing.T) {
|
|
||||||
s := `
|
|
||||||
{"keystr":"val","obj":{"keybool":true,"keyint":123456}}
|
|
||||||
`
|
|
||||||
k, v, jtype := readJSON(&s)
|
|
||||||
assert.Equal(t, jtype, int32(jsonTStr))
|
|
||||||
assert.Equal(t, "keystr", k)
|
|
||||||
assert.Equal(t, "val", v)
|
|
||||||
|
|
||||||
k, _, jtype = readJSON(&s)
|
|
||||||
assert.Equal(t, jtype, int32(jsonTObj))
|
|
||||||
assert.Equal(t, "obj", k)
|
|
||||||
|
|
||||||
k, v, jtype = readJSON(&s)
|
|
||||||
assert.Equal(t, jtype, int32(jsonTBool))
|
|
||||||
assert.Equal(t, "keybool", k)
|
|
||||||
assert.Equal(t, "true", v)
|
|
||||||
|
|
||||||
k, v, jtype = readJSON(&s)
|
|
||||||
assert.Equal(t, jtype, int32(jsonTNum))
|
|
||||||
assert.Equal(t, "keyint", k)
|
|
||||||
assert.Equal(t, "123456", v)
|
|
||||||
|
|
||||||
_, _, jtype = readJSON(&s)
|
|
||||||
assert.True(t, jtype == jsonTErr)
|
|
||||||
}
|
|
||||||
|
|
|
@ -77,10 +77,18 @@ func (c *searchCriteria) quickMatchJSONValue(line string, propertyName string) b
|
||||||
}
|
}
|
||||||
|
|
||||||
// match - checks if the log entry matches this search criteria
|
// match - checks if the log entry matches this search criteria
|
||||||
// nolint (gocyclo)
|
|
||||||
func (c *searchCriteria) match(entry *logEntry) bool {
|
func (c *searchCriteria) match(entry *logEntry) bool {
|
||||||
switch c.criteriaType {
|
switch c.criteriaType {
|
||||||
case ctDomainOrClient:
|
case ctDomainOrClient:
|
||||||
|
return c.ctDomainOrClientCase(entry)
|
||||||
|
case ctFilteringStatus:
|
||||||
|
return c.ctFilteringStatusCase(entry.Result)
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *searchCriteria) ctDomainOrClientCase(entry *logEntry) bool {
|
||||||
qhost := strings.ToLower(entry.QHost)
|
qhost := strings.ToLower(entry.QHost)
|
||||||
searchVal := strings.ToLower(c.value)
|
searchVal := strings.ToLower(c.value)
|
||||||
if c.strict && qhost == searchVal {
|
if c.strict && qhost == searchVal {
|
||||||
|
@ -96,47 +104,52 @@ func (c *searchCriteria) match(entry *logEntry) bool {
|
||||||
if !c.strict && strings.Contains(entry.IP, c.value) {
|
if !c.strict && strings.Contains(entry.IP, c.value) {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
case ctFilteringStatus:
|
func (c *searchCriteria) ctFilteringStatusCase(res dnsfilter.Result) bool {
|
||||||
res := entry.Result
|
|
||||||
|
|
||||||
switch c.value {
|
switch c.value {
|
||||||
case filteringStatusAll:
|
case filteringStatusAll:
|
||||||
return true
|
return true
|
||||||
|
|
||||||
case filteringStatusFiltered:
|
case filteringStatusFiltered:
|
||||||
return res.IsFiltered ||
|
return res.IsFiltered ||
|
||||||
res.Reason == dnsfilter.NotFilteredWhiteList ||
|
res.Reason.In(
|
||||||
res.Reason == dnsfilter.ReasonRewrite ||
|
dnsfilter.NotFilteredWhiteList,
|
||||||
res.Reason == dnsfilter.RewriteEtcHosts
|
dnsfilter.ReasonRewrite,
|
||||||
|
dnsfilter.RewriteEtcHosts,
|
||||||
|
)
|
||||||
|
|
||||||
case filteringStatusBlocked:
|
case filteringStatusBlocked:
|
||||||
return res.IsFiltered &&
|
return res.IsFiltered &&
|
||||||
(res.Reason == dnsfilter.FilteredBlackList ||
|
res.Reason.In(dnsfilter.FilteredBlackList, dnsfilter.FilteredBlockedService)
|
||||||
res.Reason == dnsfilter.FilteredBlockedService)
|
|
||||||
case filteringStatusBlockedService:
|
case filteringStatusBlockedService:
|
||||||
return res.IsFiltered && res.Reason == dnsfilter.FilteredBlockedService
|
return res.IsFiltered && res.Reason == dnsfilter.FilteredBlockedService
|
||||||
|
|
||||||
case filteringStatusBlockedParental:
|
case filteringStatusBlockedParental:
|
||||||
return res.IsFiltered && res.Reason == dnsfilter.FilteredParental
|
return res.IsFiltered && res.Reason == dnsfilter.FilteredParental
|
||||||
|
|
||||||
case filteringStatusBlockedSafebrowsing:
|
case filteringStatusBlockedSafebrowsing:
|
||||||
return res.IsFiltered && res.Reason == dnsfilter.FilteredSafeBrowsing
|
return res.IsFiltered && res.Reason == dnsfilter.FilteredSafeBrowsing
|
||||||
|
|
||||||
case filteringStatusWhitelisted:
|
case filteringStatusWhitelisted:
|
||||||
return res.Reason == dnsfilter.NotFilteredWhiteList
|
return res.Reason == dnsfilter.NotFilteredWhiteList
|
||||||
|
|
||||||
case filteringStatusRewritten:
|
case filteringStatusRewritten:
|
||||||
return res.Reason == dnsfilter.ReasonRewrite ||
|
return res.Reason.In(dnsfilter.ReasonRewrite, dnsfilter.RewriteEtcHosts)
|
||||||
res.Reason == dnsfilter.RewriteEtcHosts
|
|
||||||
case filteringStatusSafeSearch:
|
case filteringStatusSafeSearch:
|
||||||
return res.IsFiltered && res.Reason == dnsfilter.FilteredSafeSearch
|
return res.IsFiltered && res.Reason == dnsfilter.FilteredSafeSearch
|
||||||
|
|
||||||
case filteringStatusProcessed:
|
case filteringStatusProcessed:
|
||||||
return !(res.Reason == dnsfilter.FilteredBlackList ||
|
return !res.Reason.In(
|
||||||
res.Reason == dnsfilter.FilteredBlockedService ||
|
dnsfilter.FilteredBlackList,
|
||||||
res.Reason == dnsfilter.NotFilteredWhiteList)
|
dnsfilter.FilteredBlockedService,
|
||||||
|
dnsfilter.NotFilteredWhiteList,
|
||||||
|
)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -548,7 +548,6 @@ func (s *statsCtx) loadUnits(limit uint32) ([]*unitDB, uint32) {
|
||||||
* parental-blocked
|
* parental-blocked
|
||||||
These values are just the sum of data for all units.
|
These values are just the sum of data for all units.
|
||||||
*/
|
*/
|
||||||
// nolint (gocyclo)
|
|
||||||
func (s *statsCtx) getData() map[string]interface{} {
|
func (s *statsCtx) getData() map[string]interface{} {
|
||||||
limit := s.conf.limit
|
limit := s.conf.limit
|
||||||
|
|
||||||
|
@ -564,137 +563,63 @@ func (s *statsCtx) getData() map[string]interface{} {
|
||||||
}
|
}
|
||||||
|
|
||||||
// per time unit counters:
|
// per time unit counters:
|
||||||
|
|
||||||
// 720 hours may span 31 days, so we skip data for the first day in this case
|
// 720 hours may span 31 days, so we skip data for the first day in this case
|
||||||
firstDayID := (firstID + 24 - 1) / 24 * 24 // align_ceil(24)
|
firstDayID := (firstID + 24 - 1) / 24 * 24 // align_ceil(24)
|
||||||
|
|
||||||
a := []uint64{}
|
statsCollector := func(numsGetter func(u *unitDB) (num uint64)) (nums []uint64) {
|
||||||
if timeUnit == Hours {
|
if timeUnit == Hours {
|
||||||
for _, u := range units {
|
for _, u := range units {
|
||||||
a = append(a, u.NTotal)
|
nums = append(nums, numsGetter(u))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
var sum uint64
|
var sum uint64
|
||||||
id := firstDayID
|
id := firstDayID
|
||||||
nextDayID := firstDayID + 24
|
nextDayID := firstDayID + 24
|
||||||
for i := firstDayID - firstID; int(i) != len(units); i++ {
|
for i := int(firstDayID - firstID); i != len(units); i++ {
|
||||||
sum += units[i].NTotal
|
sum += numsGetter(units[i])
|
||||||
if id == nextDayID {
|
if id == nextDayID {
|
||||||
a = append(a, sum)
|
nums = append(nums, sum)
|
||||||
sum = 0
|
sum = 0
|
||||||
nextDayID += 24
|
nextDayID += 24
|
||||||
}
|
}
|
||||||
id++
|
id++
|
||||||
}
|
}
|
||||||
if id <= nextDayID {
|
if id <= nextDayID {
|
||||||
a = append(a, sum)
|
nums = append(nums, sum)
|
||||||
}
|
|
||||||
if len(a) != int(limit/24) {
|
|
||||||
log.Fatalf("len(a) != limit: %d %d", len(a), limit)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
d["dns_queries"] = a
|
return nums
|
||||||
|
}
|
||||||
a = []uint64{}
|
|
||||||
if timeUnit == Hours {
|
|
||||||
for _, u := range units {
|
|
||||||
a = append(a, u.NResult[RFiltered])
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var sum uint64
|
|
||||||
id := firstDayID
|
|
||||||
nextDayID := firstDayID + 24
|
|
||||||
for i := firstDayID - firstID; int(i) != len(units); i++ {
|
|
||||||
sum += units[i].NResult[RFiltered]
|
|
||||||
if id == nextDayID {
|
|
||||||
a = append(a, sum)
|
|
||||||
sum = 0
|
|
||||||
nextDayID += 24
|
|
||||||
}
|
|
||||||
id++
|
|
||||||
}
|
|
||||||
if id <= nextDayID {
|
|
||||||
a = append(a, sum)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
d["blocked_filtering"] = a
|
|
||||||
|
|
||||||
a = []uint64{}
|
|
||||||
if timeUnit == Hours {
|
|
||||||
for _, u := range units {
|
|
||||||
a = append(a, u.NResult[RSafeBrowsing])
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var sum uint64
|
|
||||||
id := firstDayID
|
|
||||||
nextDayID := firstDayID + 24
|
|
||||||
for i := firstDayID - firstID; int(i) != len(units); i++ {
|
|
||||||
sum += units[i].NResult[RSafeBrowsing]
|
|
||||||
if id == nextDayID {
|
|
||||||
a = append(a, sum)
|
|
||||||
sum = 0
|
|
||||||
nextDayID += 24
|
|
||||||
}
|
|
||||||
id++
|
|
||||||
}
|
|
||||||
if id <= nextDayID {
|
|
||||||
a = append(a, sum)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
d["replaced_safebrowsing"] = a
|
|
||||||
|
|
||||||
a = []uint64{}
|
|
||||||
if timeUnit == Hours {
|
|
||||||
for _, u := range units {
|
|
||||||
a = append(a, u.NResult[RParental])
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var sum uint64
|
|
||||||
id := firstDayID
|
|
||||||
nextDayID := firstDayID + 24
|
|
||||||
for i := firstDayID - firstID; int(i) != len(units); i++ {
|
|
||||||
sum += units[i].NResult[RParental]
|
|
||||||
if id == nextDayID {
|
|
||||||
a = append(a, sum)
|
|
||||||
sum = 0
|
|
||||||
nextDayID += 24
|
|
||||||
}
|
|
||||||
id++
|
|
||||||
}
|
|
||||||
if id <= nextDayID {
|
|
||||||
a = append(a, sum)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
d["replaced_parental"] = a
|
|
||||||
|
|
||||||
// top counters:
|
|
||||||
|
|
||||||
|
topsCollector := func(max int, pairsGetter func(u *unitDB) (pairs []countPair)) []map[string]uint64 {
|
||||||
m := map[string]uint64{}
|
m := map[string]uint64{}
|
||||||
for _, u := range units {
|
for _, u := range units {
|
||||||
for _, it := range u.Domains {
|
for _, it := range pairsGetter(u) {
|
||||||
m[it.Name] += it.Count
|
m[it.Name] += it.Count
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
a2 := convertMapToArray(m, maxDomains)
|
a2 := convertMapToArray(m, max)
|
||||||
d["top_queried_domains"] = convertTopArray(a2)
|
return convertTopArray(a2)
|
||||||
|
}
|
||||||
|
|
||||||
m = map[string]uint64{}
|
dnsQueries := statsCollector(func(u *unitDB) (num uint64) { return u.NTotal })
|
||||||
for _, u := range units {
|
if timeUnit != Hours && len(dnsQueries) != int(limit/24) {
|
||||||
for _, it := range u.BlockedDomains {
|
log.Fatalf("len(dnsQueries) != limit: %d %d", len(dnsQueries), limit)
|
||||||
m[it.Name] += it.Count
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
a2 = convertMapToArray(m, maxDomains)
|
|
||||||
d["top_blocked_domains"] = convertTopArray(a2)
|
|
||||||
|
|
||||||
m = map[string]uint64{}
|
statsData := map[string]interface{}{
|
||||||
for _, u := range units {
|
"dns_queries": dnsQueries,
|
||||||
for _, it := range u.Clients {
|
"blocked_filtering": statsCollector(func(u *unitDB) (num uint64) { return u.NResult[RFiltered] }),
|
||||||
m[it.Name] += it.Count
|
"replaced_safebrowsing": statsCollector(func(u *unitDB) (num uint64) { return u.NResult[RSafeBrowsing] }),
|
||||||
|
"replaced_parental": statsCollector(func(u *unitDB) (num uint64) { return u.NResult[RParental] }),
|
||||||
|
"top_queried_domains": topsCollector(maxDomains, func(u *unitDB) (pairs []countPair) { return u.Domains }),
|
||||||
|
"top_blocked_domains": topsCollector(maxDomains, func(u *unitDB) (pairs []countPair) { return u.BlockedDomains }),
|
||||||
|
"top_clients": topsCollector(maxClients, func(u *unitDB) (pairs []countPair) { return u.Clients }),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for dataKey, dataValue := range statsData {
|
||||||
|
d[dataKey] = dataValue
|
||||||
}
|
}
|
||||||
a2 = convertMapToArray(m, maxClients)
|
|
||||||
d["top_clients"] = convertTopArray(a2)
|
|
||||||
|
|
||||||
// total counters:
|
// total counters:
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue