mirror of
https://github.com/AdguardTeam/AdGuardHome.git
synced 2024-11-21 12:35:33 +03:00
Pull request: 2271 handle nolint
Merge in DNS/adguard-home from 2271-handle-nolint to master Closes #2271. Squashed commit of the following: commit fde5c8795ac79e1f7d02ba8c8e369b5a724a000e Merge: fc2acd898642dcd647
Author: Eugene Burkov <e.burkov@adguard.com> Date: Fri Nov 20 17:12:28 2020 +0300 Merge branch 'master' into 2271-handle-nolint commit fc2acd89871de08c39e80ace9e5bb8a7acb7afba Author: Eugene Burkov <e.burkov@adguard.com> Date: Tue Nov 17 11:55:29 2020 +0300 dnsforward: fix test output strings commit c4ebae6ea9c293bad239519c44ca5a6c576bb921 Author: Eugene Burkov <e.burkov@adguard.com> Date: Mon Nov 16 22:43:20 2020 +0300 dnsfilter: make package pass tests commit f2d98c6acabd8977f3b1b361987eaa31eb6eb9ad Author: Eugene Burkov <e.burkov@adguard.com> Date: Mon Nov 16 20:05:00 2020 +0300 querylog: make decoding pass tests commit ab5850d24c50d53b8393f2de448cc340241351d7 Merge: 6ed2066bf8a9c6e8a0
Author: Eugene Burkov <e.burkov@adguard.com> Date: Mon Nov 16 19:48:31 2020 +0300 Merge branch 'master' into 2271-handle-nolint commit 6ed2066bf567e13dd14cfa16fc7b109b59fa39ef Author: Eugene Burkov <e.burkov@adguard.com> Date: Mon Nov 16 18:13:45 2020 +0300 home: fix tests naming commit af691081fb02b7500a746b16492f01f7f9befe9a Author: Eugene Burkov <e.burkov@adguard.com> Date: Mon Nov 16 12:15:49 2020 +0300 home: impove code quality commit 2914cd3cd23ef2a1964116baab9187d89b377f86 Author: Eugene Burkov <e.burkov@adguard.com> Date: Wed Nov 11 15:46:39 2020 +0300 * querylog: remove useless check commit 9996840650e784ccc76d1f29964560435ba27dc7 Author: Eugene Burkov <e.burkov@adguard.com> Date: Wed Nov 11 13:18:34 2020 +0300 * all: fix noticed defects commit 2b15293e59337f70302fbc0db81ebb26bee0bed2 Author: Eugene Burkov <e.burkov@adguard.com> Date: Tue Nov 10 20:15:53 2020 +0300 * stats: remove last nolint directive commit b2e1ddf7b58196a2fdbf879f084edb41ca1aa1eb Author: Eugene Burkov <e.burkov@adguard.com> Date: Tue Nov 10 18:35:41 2020 +0300 * all: remove another nolint directive commit c6fc5cfcc9c95ab9e570a95ab41c3e5c0125e62e Author: Eugene Burkov <e.burkov@adguard.com> Date: Tue Nov 10 18:11:28 2020 +0300 * querylog: remove nolint directive commit 226ddbf2c92f737f085b44a4ddf6daec7b602153 Author: Eugene Burkov <e.burkov@adguard.com> Date: Tue Nov 10 16:35:26 2020 +0300 * home: remove nolint directive commit 2ea3086ad41e9003282add7e996ae722d72d878b Author: Eugene Burkov <e.burkov@adguard.com> Date: Tue Nov 10 16:13:57 2020 +0300 * home: reduce cyclomatic complexity of run function commit f479b480c48e0bb832ddef8f57586f56b8a55bab Author: Eugene Burkov <e.burkov@adguard.com> Date: Tue Nov 10 15:35:46 2020 +0300 * home: use crypto/rand instead of math/rand commit a28d4a53e3b930136b036606fc7e78404f1d208b Author: Eugene Burkov <e.burkov@adguard.com> Date: Tue Nov 10 14:11:07 2020 +0300 * dnsforward: remove gocyclo nolint directive commit 64a0a324cc2b20614ceec3ccc6505e960fe526e9 Author: Eugene Burkov <e.burkov@adguard.com> Date: Tue Nov 10 11:45:49 2020 +0300 all *: remove some nolint directives Updates #2271.
This commit is contained in:
parent
642dcd647c
commit
3045da1742
17 changed files with 1053 additions and 832 deletions
|
@ -39,7 +39,6 @@ linters:
|
|||
- govet
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- unused
|
||||
- varcheck
|
||||
- bodyclose
|
||||
|
|
|
@ -177,6 +177,16 @@ func (r Reason) String() string {
|
|||
return reasonNames[r]
|
||||
}
|
||||
|
||||
// In returns true if reasons include r.
|
||||
func (r Reason) In(reasons ...Reason) bool {
|
||||
for _, reason := range reasons {
|
||||
if r == reason {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetConfig - get configuration
|
||||
func (d *Dnsfilter) GetConfig() RequestFilteringSettings {
|
||||
c := RequestFilteringSettings{}
|
||||
|
|
|
@ -71,31 +71,35 @@ func (c *sbCtx) setCache(prefix, hashes []byte) {
|
|||
log.Debug("%s: stored in cache: %v", c.svc, prefix)
|
||||
}
|
||||
|
||||
// findInHash returns 32-byte hash if it's found in hashToHost.
|
||||
func (c *sbCtx) findInHash(val []byte) (hash32 [32]byte, found bool) {
|
||||
for i := 4; i < len(val); i += 32 {
|
||||
hash := val[i : i+32]
|
||||
|
||||
copy(hash32[:], hash[0:32])
|
||||
|
||||
_, found = c.hashToHost[hash32]
|
||||
if found {
|
||||
return hash32, found
|
||||
}
|
||||
}
|
||||
|
||||
return [32]byte{}, false
|
||||
}
|
||||
|
||||
func (c *sbCtx) getCached() int {
|
||||
now := time.Now().Unix()
|
||||
hashesToRequest := map[[32]byte]string{}
|
||||
for k, v := range c.hashToHost {
|
||||
key := k[0:2]
|
||||
val := c.cache.Get(key)
|
||||
if val != nil {
|
||||
expire := binary.BigEndian.Uint32(val)
|
||||
if now >= int64(expire) {
|
||||
val = nil
|
||||
} else {
|
||||
for i := 4; i < len(val); i += 32 {
|
||||
hash := val[i : i+32]
|
||||
var hash32 [32]byte
|
||||
copy(hash32[:], hash[0:32])
|
||||
_, found := c.hashToHost[hash32]
|
||||
if found {
|
||||
log.Debug("%s: found in cache: %s: blocked by %v", c.svc, c.host, hash32)
|
||||
return 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if val == nil {
|
||||
if val == nil || now >= int64(binary.BigEndian.Uint32(val)) {
|
||||
hashesToRequest[k] = v
|
||||
continue
|
||||
}
|
||||
if hash32, found := c.findInHash(val); found {
|
||||
log.Debug("%s: found in cache: %s: blocked by %v", c.svc, c.host, hash32)
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -254,106 +258,71 @@ func (c *sbCtx) storeCache(hashes [][]byte) {
|
|||
}
|
||||
}
|
||||
|
||||
// Disabling "dupl": the algorithm of SB/PC is similar, but it uses different data
|
||||
// nolint:dupl
|
||||
func check(c *sbCtx, r Result, u upstream.Upstream) (Result, error) {
|
||||
c.hashToHost = hostnameToHashes(c.host)
|
||||
switch c.getCached() {
|
||||
case -1:
|
||||
return Result{}, nil
|
||||
case 1:
|
||||
return r, nil
|
||||
}
|
||||
|
||||
question := c.getQuestion()
|
||||
|
||||
log.Tracef("%s: checking %s: %s", c.svc, c.host, question)
|
||||
req := (&dns.Msg{}).SetQuestion(question, dns.TypeTXT)
|
||||
|
||||
resp, err := u.Exchange(req)
|
||||
if err != nil {
|
||||
return Result{}, err
|
||||
}
|
||||
|
||||
matched, receivedHashes := c.processTXT(resp)
|
||||
|
||||
c.storeCache(receivedHashes)
|
||||
if matched {
|
||||
return r, nil
|
||||
}
|
||||
|
||||
return Result{}, nil
|
||||
}
|
||||
|
||||
func (d *Dnsfilter) checkSafeBrowsing(host string) (Result, error) {
|
||||
if log.GetLevel() >= log.DEBUG {
|
||||
timer := log.StartTimer()
|
||||
defer timer.LogElapsed("SafeBrowsing lookup for %s", host)
|
||||
}
|
||||
|
||||
result := Result{}
|
||||
hashes := hostnameToHashes(host)
|
||||
|
||||
c := &sbCtx{
|
||||
host: host,
|
||||
svc: "SafeBrowsing",
|
||||
hashToHost: hashes,
|
||||
cache: gctx.safebrowsingCache,
|
||||
cacheTime: d.Config.CacheTime,
|
||||
ctx := &sbCtx{
|
||||
host: host,
|
||||
svc: "SafeBrowsing",
|
||||
cache: gctx.safebrowsingCache,
|
||||
cacheTime: d.Config.CacheTime,
|
||||
}
|
||||
|
||||
// check cache
|
||||
match := c.getCached()
|
||||
if match < 0 {
|
||||
return result, nil
|
||||
} else if match > 0 {
|
||||
result.IsFiltered = true
|
||||
result.Reason = FilteredSafeBrowsing
|
||||
result.Rule = "adguard-malware-shavar"
|
||||
return result, nil
|
||||
res := Result{
|
||||
IsFiltered: true,
|
||||
Reason: FilteredSafeBrowsing,
|
||||
Rule: "adguard-malware-shavar",
|
||||
}
|
||||
|
||||
question := c.getQuestion()
|
||||
log.Tracef("SafeBrowsing: checking %s: %s", host, question)
|
||||
|
||||
req := dns.Msg{}
|
||||
req.SetQuestion(question, dns.TypeTXT)
|
||||
resp, err := d.safeBrowsingUpstream.Exchange(&req)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
matched, receivedHashes := c.processTXT(resp)
|
||||
if matched {
|
||||
result.IsFiltered = true
|
||||
result.Reason = FilteredSafeBrowsing
|
||||
result.Rule = "adguard-malware-shavar"
|
||||
}
|
||||
c.storeCache(receivedHashes)
|
||||
|
||||
return result, nil
|
||||
return check(ctx, res, d.safeBrowsingUpstream)
|
||||
}
|
||||
|
||||
// Disabling "dupl": the algorithm of SB/PC is similar, but it uses different data
|
||||
// nolint:dupl
|
||||
func (d *Dnsfilter) checkParental(host string) (Result, error) {
|
||||
if log.GetLevel() >= log.DEBUG {
|
||||
timer := log.StartTimer()
|
||||
defer timer.LogElapsed("Parental lookup for %s", host)
|
||||
}
|
||||
|
||||
result := Result{}
|
||||
hashes := hostnameToHashes(host)
|
||||
|
||||
c := &sbCtx{
|
||||
host: host,
|
||||
svc: "Parental",
|
||||
hashToHost: hashes,
|
||||
cache: gctx.parentalCache,
|
||||
cacheTime: d.Config.CacheTime,
|
||||
ctx := &sbCtx{
|
||||
host: host,
|
||||
svc: "Parental",
|
||||
cache: gctx.parentalCache,
|
||||
cacheTime: d.Config.CacheTime,
|
||||
}
|
||||
|
||||
// check cache
|
||||
match := c.getCached()
|
||||
if match < 0 {
|
||||
return result, nil
|
||||
} else if match > 0 {
|
||||
result.IsFiltered = true
|
||||
result.Reason = FilteredParental
|
||||
result.Rule = "parental CATEGORY_BLACKLISTED"
|
||||
return result, nil
|
||||
res := Result{
|
||||
IsFiltered: true,
|
||||
Reason: FilteredParental,
|
||||
Rule: "parental CATEGORY_BLACKLISTED",
|
||||
}
|
||||
|
||||
question := c.getQuestion()
|
||||
log.Tracef("Parental: checking %s: %s", host, question)
|
||||
|
||||
req := dns.Msg{}
|
||||
req.SetQuestion(question, dns.TypeTXT)
|
||||
resp, err := d.parentalUpstream.Exchange(&req)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
matched, receivedHashes := c.processTXT(resp)
|
||||
if matched {
|
||||
result.IsFiltered = true
|
||||
result.Reason = FilteredParental
|
||||
result.Rule = "parental CATEGORY_BLACKLISTED"
|
||||
}
|
||||
c.storeCache(receivedHashes)
|
||||
|
||||
return result, err
|
||||
return check(ctx, res, d.parentalUpstream)
|
||||
}
|
||||
|
||||
func httpError(r *http.Request, w http.ResponseWriter, code int, format string, args ...interface{}) {
|
||||
|
|
|
@ -94,19 +94,24 @@ type FilteringConfig struct {
|
|||
type TLSConfig struct {
|
||||
TLSListenAddr *net.TCPAddr `yaml:"-" json:"-"`
|
||||
QUICListenAddr *net.UDPAddr `yaml:"-" json:"-"`
|
||||
StrictSNICheck bool `yaml:"strict_sni_check" json:"-"` // Reject connection if the client uses server name (in SNI) that doesn't match the certificate
|
||||
|
||||
CertificateChain string `yaml:"certificate_chain" json:"certificate_chain"` // PEM-encoded certificates chain
|
||||
PrivateKey string `yaml:"private_key" json:"private_key"` // PEM-encoded private key
|
||||
// Reject connection if the client uses server name (in SNI) that doesn't match the certificate
|
||||
StrictSNICheck bool `yaml:"strict_sni_check" json:"-"`
|
||||
|
||||
CertificatePath string `yaml:"certificate_path" json:"certificate_path"` // certificate file name
|
||||
PrivateKeyPath string `yaml:"private_key_path" json:"private_key_path"` // private key file name
|
||||
// PEM-encoded certificates chain
|
||||
CertificateChain string `yaml:"certificate_chain" json:"certificate_chain"`
|
||||
// PEM-encoded private key
|
||||
PrivateKey string `yaml:"private_key" json:"private_key"`
|
||||
|
||||
CertificatePath string `yaml:"certificate_path" json:"certificate_path"`
|
||||
PrivateKeyPath string `yaml:"private_key_path" json:"private_key_path"`
|
||||
|
||||
CertificateChainData []byte `yaml:"-" json:"-"`
|
||||
PrivateKeyData []byte `yaml:"-" json:"-"`
|
||||
|
||||
cert tls.Certificate // nolint(structcheck) - linter thinks that this field is unused, while TLSConfig is directly included into ServerConfig
|
||||
dnsNames []string // nolint(structcheck) // DNS names from certificate (SAN) or CN value from Subject
|
||||
cert tls.Certificate
|
||||
// DNS names from certificate (SAN) or CN value from Subject
|
||||
dnsNames []string
|
||||
}
|
||||
|
||||
// ServerConfig represents server configuration.
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/AdguardTeam/dnsproxy/upstream"
|
||||
"github.com/AdguardTeam/golibs/jsonutil"
|
||||
"github.com/AdguardTeam/golibs/log"
|
||||
"github.com/AdguardTeam/golibs/utils"
|
||||
"github.com/miekg/dns"
|
||||
|
@ -21,232 +20,292 @@ func httpError(r *http.Request, w http.ResponseWriter, code int, format string,
|
|||
http.Error(w, text, code)
|
||||
}
|
||||
|
||||
type dnsConfigJSON struct {
|
||||
Upstreams []string `json:"upstream_dns"`
|
||||
UpstreamsFile string `json:"upstream_dns_file"`
|
||||
Bootstraps []string `json:"bootstrap_dns"`
|
||||
type dnsConfig struct {
|
||||
Upstreams *[]string `json:"upstream_dns"`
|
||||
UpstreamsFile *string `json:"upstream_dns_file"`
|
||||
Bootstraps *[]string `json:"bootstrap_dns"`
|
||||
|
||||
ProtectionEnabled bool `json:"protection_enabled"`
|
||||
RateLimit uint32 `json:"ratelimit"`
|
||||
BlockingMode string `json:"blocking_mode"`
|
||||
BlockingIPv4 string `json:"blocking_ipv4"`
|
||||
BlockingIPv6 string `json:"blocking_ipv6"`
|
||||
EDNSCSEnabled bool `json:"edns_cs_enabled"`
|
||||
DNSSECEnabled bool `json:"dnssec_enabled"`
|
||||
DisableIPv6 bool `json:"disable_ipv6"`
|
||||
UpstreamMode string `json:"upstream_mode"`
|
||||
CacheSize uint32 `json:"cache_size"`
|
||||
CacheMinTTL uint32 `json:"cache_ttl_min"`
|
||||
CacheMaxTTL uint32 `json:"cache_ttl_max"`
|
||||
ProtectionEnabled *bool `json:"protection_enabled"`
|
||||
RateLimit *uint32 `json:"ratelimit"`
|
||||
BlockingMode *string `json:"blocking_mode"`
|
||||
BlockingIPv4 *string `json:"blocking_ipv4"`
|
||||
BlockingIPv6 *string `json:"blocking_ipv6"`
|
||||
EDNSCSEnabled *bool `json:"edns_cs_enabled"`
|
||||
DNSSECEnabled *bool `json:"dnssec_enabled"`
|
||||
DisableIPv6 *bool `json:"disable_ipv6"`
|
||||
UpstreamMode *string `json:"upstream_mode"`
|
||||
CacheSize *uint32 `json:"cache_size"`
|
||||
CacheMinTTL *uint32 `json:"cache_ttl_min"`
|
||||
CacheMaxTTL *uint32 `json:"cache_ttl_max"`
|
||||
}
|
||||
|
||||
func (s *Server) getDNSConfig() dnsConfig {
|
||||
s.RLock()
|
||||
upstreams := stringArrayDup(s.conf.UpstreamDNS)
|
||||
upstreamFile := s.conf.UpstreamDNSFileName
|
||||
bootstraps := stringArrayDup(s.conf.BootstrapDNS)
|
||||
protectionEnabled := s.conf.ProtectionEnabled
|
||||
blockingMode := s.conf.BlockingMode
|
||||
BlockingIPv4 := s.conf.BlockingIPv4
|
||||
BlockingIPv6 := s.conf.BlockingIPv6
|
||||
Ratelimit := s.conf.Ratelimit
|
||||
EnableEDNSClientSubnet := s.conf.EnableEDNSClientSubnet
|
||||
EnableDNSSEC := s.conf.EnableDNSSEC
|
||||
AAAADisabled := s.conf.AAAADisabled
|
||||
CacheSize := s.conf.CacheSize
|
||||
CacheMinTTL := s.conf.CacheMinTTL
|
||||
CacheMaxTTL := s.conf.CacheMaxTTL
|
||||
var upstreamMode string
|
||||
if s.conf.FastestAddr {
|
||||
upstreamMode = "fastest_addr"
|
||||
} else if s.conf.AllServers {
|
||||
upstreamMode = "parallel"
|
||||
}
|
||||
s.RUnlock()
|
||||
return dnsConfig{
|
||||
Upstreams: &upstreams,
|
||||
UpstreamsFile: &upstreamFile,
|
||||
Bootstraps: &bootstraps,
|
||||
ProtectionEnabled: &protectionEnabled,
|
||||
BlockingMode: &blockingMode,
|
||||
BlockingIPv4: &BlockingIPv4,
|
||||
BlockingIPv6: &BlockingIPv6,
|
||||
RateLimit: &Ratelimit,
|
||||
EDNSCSEnabled: &EnableEDNSClientSubnet,
|
||||
DNSSECEnabled: &EnableDNSSEC,
|
||||
DisableIPv6: &AAAADisabled,
|
||||
CacheSize: &CacheSize,
|
||||
CacheMinTTL: &CacheMinTTL,
|
||||
CacheMaxTTL: &CacheMaxTTL,
|
||||
UpstreamMode: &upstreamMode,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) handleGetConfig(w http.ResponseWriter, r *http.Request) {
|
||||
resp := dnsConfigJSON{}
|
||||
s.RLock()
|
||||
resp.Upstreams = stringArrayDup(s.conf.UpstreamDNS)
|
||||
resp.UpstreamsFile = s.conf.UpstreamDNSFileName
|
||||
resp.Bootstraps = stringArrayDup(s.conf.BootstrapDNS)
|
||||
resp := s.getDNSConfig()
|
||||
|
||||
resp.ProtectionEnabled = s.conf.ProtectionEnabled
|
||||
resp.BlockingMode = s.conf.BlockingMode
|
||||
resp.BlockingIPv4 = s.conf.BlockingIPv4
|
||||
resp.BlockingIPv6 = s.conf.BlockingIPv6
|
||||
resp.RateLimit = s.conf.Ratelimit
|
||||
resp.EDNSCSEnabled = s.conf.EnableEDNSClientSubnet
|
||||
resp.DNSSECEnabled = s.conf.EnableDNSSEC
|
||||
resp.DisableIPv6 = s.conf.AAAADisabled
|
||||
resp.CacheSize = s.conf.CacheSize
|
||||
resp.CacheMinTTL = s.conf.CacheMinTTL
|
||||
resp.CacheMaxTTL = s.conf.CacheMaxTTL
|
||||
if s.conf.FastestAddr {
|
||||
resp.UpstreamMode = "fastest_addr"
|
||||
} else if s.conf.AllServers {
|
||||
resp.UpstreamMode = "parallel"
|
||||
}
|
||||
s.RUnlock()
|
||||
|
||||
js, err := json.Marshal(resp)
|
||||
if err != nil {
|
||||
httpError(r, w, http.StatusInternalServerError, "json.Marshal: %s", err)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
_, _ = w.Write(js)
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
if err := enc.Encode(resp); err != nil {
|
||||
httpError(r, w, http.StatusInternalServerError, "json.Encoder: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func checkBlockingMode(req dnsConfigJSON) bool {
|
||||
bm := req.BlockingMode
|
||||
if !(bm == "default" || bm == "refused" || bm == "nxdomain" || bm == "null_ip" || bm == "custom_ip") {
|
||||
return false
|
||||
func (req *dnsConfig) checkBlockingMode() bool {
|
||||
if req.BlockingMode == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
bm := *req.BlockingMode
|
||||
if bm == "custom_ip" {
|
||||
ip := net.ParseIP(req.BlockingIPv4)
|
||||
if ip == nil || ip.To4() == nil {
|
||||
if req.BlockingIPv4 == nil || req.BlockingIPv6 == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
ip = net.ParseIP(req.BlockingIPv6)
|
||||
if ip == nil {
|
||||
ip4 := net.ParseIP(*req.BlockingIPv4)
|
||||
if ip4 == nil || ip4.To4() == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
ip6 := net.ParseIP(*req.BlockingIPv6)
|
||||
return ip6 != nil
|
||||
}
|
||||
|
||||
for _, valid := range []string{
|
||||
"default",
|
||||
"refused",
|
||||
"nxdomain",
|
||||
"null_ip",
|
||||
} {
|
||||
if bm == valid {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
return false
|
||||
}
|
||||
|
||||
// Validate bootstrap server address
|
||||
func checkBootstrap(addr string) error {
|
||||
if addr == "" { // additional check is required because NewResolver() allows empty address
|
||||
return fmt.Errorf("invalid bootstrap server address: empty")
|
||||
func (req *dnsConfig) checkUpstreamsMode() bool {
|
||||
if req.UpstreamMode == nil {
|
||||
return true
|
||||
}
|
||||
_, err := upstream.NewResolver(addr, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid bootstrap server address: %w", err)
|
||||
|
||||
for _, valid := range []string{
|
||||
"",
|
||||
"fastest_addr",
|
||||
"parallel",
|
||||
} {
|
||||
if *req.UpstreamMode == valid {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (req *dnsConfig) checkBootstrap() (string, error) {
|
||||
if req.Bootstraps == nil {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
for _, boot := range *req.Bootstraps {
|
||||
if boot == "" {
|
||||
return boot, fmt.Errorf("invalid bootstrap server address: empty")
|
||||
}
|
||||
|
||||
if _, err := upstream.NewResolver(boot, 0); err != nil {
|
||||
return boot, fmt.Errorf("invalid bootstrap server address: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (req *dnsConfig) checkCacheTTL() bool {
|
||||
if req.CacheMinTTL == nil && req.CacheMaxTTL == nil {
|
||||
return true
|
||||
}
|
||||
var min, max uint32
|
||||
if req.CacheMinTTL != nil {
|
||||
min = *req.CacheMinTTL
|
||||
}
|
||||
if req.CacheMaxTTL == nil {
|
||||
max = *req.CacheMaxTTL
|
||||
}
|
||||
|
||||
return min <= max
|
||||
}
|
||||
|
||||
// nolint(gocyclo) - we need to check each JSON field separately
|
||||
func (s *Server) handleSetConfig(w http.ResponseWriter, r *http.Request) {
|
||||
req := dnsConfigJSON{}
|
||||
js, err := jsonutil.DecodeObject(&req, r.Body)
|
||||
if err != nil {
|
||||
httpError(r, w, http.StatusBadRequest, "json.Decode: %s", err)
|
||||
req := dnsConfig{}
|
||||
dec := json.NewDecoder(r.Body)
|
||||
if err := dec.Decode(&req); err != nil {
|
||||
httpError(r, w, http.StatusBadRequest, "json Encode: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
if js.Exists("upstream_dns") {
|
||||
err = ValidateUpstreams(req.Upstreams)
|
||||
if err != nil {
|
||||
if req.Upstreams != nil {
|
||||
if err := ValidateUpstreams(*req.Upstreams); err != nil {
|
||||
httpError(r, w, http.StatusBadRequest, "wrong upstreams specification: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if js.Exists("bootstrap_dns") {
|
||||
for _, boot := range req.Bootstraps {
|
||||
if err := checkBootstrap(boot); err != nil {
|
||||
httpError(r, w, http.StatusBadRequest, "%s can not be used as bootstrap dns cause: %s", boot, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
if errBoot, err := req.checkBootstrap(); err != nil {
|
||||
httpError(r, w, http.StatusBadRequest, "%s can not be used as bootstrap dns cause: %s", errBoot, err)
|
||||
return
|
||||
}
|
||||
|
||||
if js.Exists("blocking_mode") && !checkBlockingMode(req) {
|
||||
if !req.checkBlockingMode() {
|
||||
httpError(r, w, http.StatusBadRequest, "blocking_mode: incorrect value")
|
||||
return
|
||||
}
|
||||
|
||||
if js.Exists("upstream_mode") &&
|
||||
!(req.UpstreamMode == "" || req.UpstreamMode == "fastest_addr" || req.UpstreamMode == "parallel") {
|
||||
if !req.checkUpstreamsMode() {
|
||||
httpError(r, w, http.StatusBadRequest, "upstream_mode: incorrect value")
|
||||
return
|
||||
}
|
||||
|
||||
if req.CacheMinTTL > req.CacheMaxTTL {
|
||||
if !req.checkCacheTTL() {
|
||||
httpError(r, w, http.StatusBadRequest, "cache_ttl_min must be less or equal than cache_ttl_max")
|
||||
return
|
||||
}
|
||||
|
||||
restart := false
|
||||
s.Lock()
|
||||
|
||||
if js.Exists("upstream_dns") {
|
||||
s.conf.UpstreamDNS = req.Upstreams
|
||||
restart = true
|
||||
}
|
||||
|
||||
if js.Exists("upstream_dns_file") {
|
||||
s.conf.UpstreamDNSFileName = req.UpstreamsFile
|
||||
restart = true
|
||||
}
|
||||
|
||||
if js.Exists("bootstrap_dns") {
|
||||
s.conf.BootstrapDNS = req.Bootstraps
|
||||
restart = true
|
||||
}
|
||||
|
||||
if js.Exists("protection_enabled") {
|
||||
s.conf.ProtectionEnabled = req.ProtectionEnabled
|
||||
}
|
||||
|
||||
if js.Exists("blocking_mode") {
|
||||
s.conf.BlockingMode = req.BlockingMode
|
||||
if req.BlockingMode == "custom_ip" {
|
||||
if js.Exists("blocking_ipv4") {
|
||||
s.conf.BlockingIPv4 = req.BlockingIPv4
|
||||
s.conf.BlockingIPAddrv4 = net.ParseIP(req.BlockingIPv4)
|
||||
}
|
||||
if js.Exists("blocking_ipv6") {
|
||||
s.conf.BlockingIPv6 = req.BlockingIPv6
|
||||
s.conf.BlockingIPAddrv6 = net.ParseIP(req.BlockingIPv6)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if js.Exists("ratelimit") {
|
||||
if s.conf.Ratelimit != req.RateLimit {
|
||||
restart = true
|
||||
}
|
||||
s.conf.Ratelimit = req.RateLimit
|
||||
}
|
||||
|
||||
if js.Exists("edns_cs_enabled") {
|
||||
s.conf.EnableEDNSClientSubnet = req.EDNSCSEnabled
|
||||
restart = true
|
||||
}
|
||||
|
||||
if js.Exists("dnssec_enabled") {
|
||||
s.conf.EnableDNSSEC = req.DNSSECEnabled
|
||||
}
|
||||
|
||||
if js.Exists("disable_ipv6") {
|
||||
s.conf.AAAADisabled = req.DisableIPv6
|
||||
}
|
||||
|
||||
if js.Exists("cache_size") {
|
||||
s.conf.CacheSize = req.CacheSize
|
||||
restart = true
|
||||
}
|
||||
|
||||
if js.Exists("cache_ttl_min") {
|
||||
s.conf.CacheMinTTL = req.CacheMinTTL
|
||||
restart = true
|
||||
}
|
||||
|
||||
if js.Exists("cache_ttl_max") {
|
||||
s.conf.CacheMaxTTL = req.CacheMaxTTL
|
||||
restart = true
|
||||
}
|
||||
|
||||
if js.Exists("upstream_mode") {
|
||||
s.conf.FastestAddr = false
|
||||
s.conf.AllServers = false
|
||||
switch req.UpstreamMode {
|
||||
case "":
|
||||
//
|
||||
|
||||
case "parallel":
|
||||
s.conf.AllServers = true
|
||||
|
||||
case "fastest_addr":
|
||||
s.conf.FastestAddr = true
|
||||
}
|
||||
}
|
||||
|
||||
s.Unlock()
|
||||
s.conf.ConfigModified()
|
||||
|
||||
if restart {
|
||||
err = s.Reconfigure(nil)
|
||||
if err != nil {
|
||||
if s.setConfig(req) {
|
||||
if err := s.Reconfigure(nil); err != nil {
|
||||
httpError(r, w, http.StatusInternalServerError, "%s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) setConfig(dc dnsConfig) (restart bool) {
|
||||
s.Lock()
|
||||
|
||||
if dc.Upstreams != nil {
|
||||
s.conf.UpstreamDNS = *dc.Upstreams
|
||||
restart = true
|
||||
}
|
||||
|
||||
if dc.UpstreamsFile != nil {
|
||||
s.conf.UpstreamDNSFileName = *dc.UpstreamsFile
|
||||
restart = true
|
||||
}
|
||||
|
||||
if dc.Bootstraps != nil {
|
||||
s.conf.BootstrapDNS = *dc.Bootstraps
|
||||
restart = true
|
||||
}
|
||||
|
||||
if dc.ProtectionEnabled != nil {
|
||||
s.conf.ProtectionEnabled = *dc.ProtectionEnabled
|
||||
}
|
||||
|
||||
if dc.BlockingMode != nil {
|
||||
s.conf.BlockingMode = *dc.BlockingMode
|
||||
if *dc.BlockingMode == "custom_ip" {
|
||||
s.conf.BlockingIPv4 = *dc.BlockingIPv4
|
||||
s.conf.BlockingIPAddrv4 = net.ParseIP(*dc.BlockingIPv4)
|
||||
s.conf.BlockingIPv6 = *dc.BlockingIPv6
|
||||
s.conf.BlockingIPAddrv6 = net.ParseIP(*dc.BlockingIPv6)
|
||||
}
|
||||
}
|
||||
|
||||
if dc.RateLimit != nil {
|
||||
if s.conf.Ratelimit != *dc.RateLimit {
|
||||
restart = true
|
||||
}
|
||||
s.conf.Ratelimit = *dc.RateLimit
|
||||
}
|
||||
|
||||
if dc.EDNSCSEnabled != nil {
|
||||
s.conf.EnableEDNSClientSubnet = *dc.EDNSCSEnabled
|
||||
restart = true
|
||||
}
|
||||
|
||||
if dc.DNSSECEnabled != nil {
|
||||
s.conf.EnableDNSSEC = *dc.DNSSECEnabled
|
||||
}
|
||||
|
||||
if dc.DisableIPv6 != nil {
|
||||
s.conf.AAAADisabled = *dc.DisableIPv6
|
||||
}
|
||||
|
||||
if dc.CacheSize != nil {
|
||||
s.conf.CacheSize = *dc.CacheSize
|
||||
restart = true
|
||||
}
|
||||
|
||||
if dc.CacheMinTTL != nil {
|
||||
s.conf.CacheMinTTL = *dc.CacheMinTTL
|
||||
restart = true
|
||||
}
|
||||
|
||||
if dc.CacheMaxTTL != nil {
|
||||
s.conf.CacheMaxTTL = *dc.CacheMaxTTL
|
||||
restart = true
|
||||
}
|
||||
|
||||
if dc.UpstreamMode != nil {
|
||||
switch *dc.UpstreamMode {
|
||||
case "parallel":
|
||||
s.conf.AllServers = true
|
||||
s.conf.FastestAddr = false
|
||||
case "fastest_addr":
|
||||
s.conf.AllServers = false
|
||||
s.conf.FastestAddr = true
|
||||
default:
|
||||
s.conf.AllServers = false
|
||||
s.conf.FastestAddr = false
|
||||
}
|
||||
}
|
||||
s.Unlock()
|
||||
s.conf.ConfigModified()
|
||||
return restart
|
||||
}
|
||||
|
||||
type upstreamJSON struct {
|
||||
Upstreams []string `json:"upstream_dns"` // Upstreams
|
||||
BootstrapDNS []string `json:"bootstrap_dns"` // Bootstrap DNS
|
||||
|
|
|
@ -29,7 +29,7 @@ func TestDNSForwardHTTTP_handleGetConfig(t *testing.T) {
|
|||
conf: func() ServerConfig {
|
||||
return defaultConf
|
||||
},
|
||||
want: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
||||
want: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||
}, {
|
||||
name: "fastest_addr",
|
||||
conf: func() ServerConfig {
|
||||
|
@ -37,7 +37,7 @@ func TestDNSForwardHTTTP_handleGetConfig(t *testing.T) {
|
|||
conf.FastestAddr = true
|
||||
return conf
|
||||
},
|
||||
want: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"fastest_addr\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
||||
want: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"fastest_addr\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||
}, {
|
||||
name: "parallel",
|
||||
conf: func() ServerConfig {
|
||||
|
@ -45,7 +45,7 @@ func TestDNSForwardHTTTP_handleGetConfig(t *testing.T) {
|
|||
conf.AllServers = true
|
||||
return conf
|
||||
},
|
||||
want: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"parallel\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
||||
want: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"parallel\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
@ -73,7 +73,7 @@ func TestDNSForwardHTTTP_handleSetConfig(t *testing.T) {
|
|||
|
||||
w := httptest.NewRecorder()
|
||||
|
||||
const defaultConfJSON = "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}"
|
||||
const defaultConfJSON = "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n"
|
||||
testCases := []struct {
|
||||
name string
|
||||
req string
|
||||
|
@ -83,52 +83,52 @@ func TestDNSForwardHTTTP_handleSetConfig(t *testing.T) {
|
|||
name: "upstream_dns",
|
||||
req: "{\"upstream_dns\":[\"8.8.8.8:77\",\"8.8.4.4:77\"]}",
|
||||
wantSet: "",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:77\",\"8.8.4.4:77\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:77\",\"8.8.4.4:77\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||
}, {
|
||||
name: "bootstraps",
|
||||
req: "{\"bootstrap_dns\":[\"9.9.9.10\"]}",
|
||||
wantSet: "",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||
}, {
|
||||
name: "blocking_mode_good",
|
||||
req: "{\"blocking_mode\":\"refused\"}",
|
||||
wantSet: "",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"refused\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"refused\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||
}, {
|
||||
name: "blocking_mode_bad",
|
||||
req: "{\"blocking_mode\":\"custom_ip\"}",
|
||||
wantSet: "blocking_mode: incorrect value\n",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||
}, {
|
||||
name: "ratelimit",
|
||||
req: "{\"ratelimit\":6}",
|
||||
wantSet: "",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":6,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":6,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||
}, {
|
||||
name: "edns_cs_enabled",
|
||||
req: "{\"edns_cs_enabled\":true}",
|
||||
wantSet: "",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":true,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":true,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||
}, {
|
||||
name: "dnssec_enabled",
|
||||
req: "{\"dnssec_enabled\":true}",
|
||||
wantSet: "",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":true,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":true,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||
}, {
|
||||
name: "cache_size",
|
||||
req: "{\"cache_size\":1024}",
|
||||
wantSet: "",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":1024,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"\",\"cache_size\":1024,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||
}, {
|
||||
name: "upstream_mode_parallel",
|
||||
req: "{\"upstream_mode\":\"parallel\"}",
|
||||
wantSet: "",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"parallel\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"parallel\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||
}, {
|
||||
name: "upstream_mode_fastest_addr",
|
||||
req: "{\"upstream_mode\":\"fastest_addr\"}",
|
||||
wantSet: "",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"fastest_addr\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}",
|
||||
wantGet: "{\"upstream_dns\":[\"8.8.8.8:53\",\"8.8.4.4:53\"],\"upstream_dns_file\":\"\",\"bootstrap_dns\":[\"9.9.9.10\",\"149.112.112.10\",\"2620:fe::10\",\"2620:fe::fe:10\"],\"protection_enabled\":true,\"ratelimit\":0,\"blocking_mode\":\"\",\"blocking_ipv4\":\"\",\"blocking_ipv6\":\"\",\"edns_cs_enabled\":false,\"dnssec_enabled\":false,\"disable_ipv6\":false,\"upstream_mode\":\"fastest_addr\",\"cache_size\":0,\"cache_ttl_min\":0,\"cache_ttl_max\":0}\n",
|
||||
}, {
|
||||
name: "upstream_dns_bad",
|
||||
req: "{\"upstream_dns\":[\"\"]}",
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
package home
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"math"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -76,7 +78,6 @@ func InitAuth(dbFilename string, users []User, sessionTTL uint32) *Auth {
|
|||
a := Auth{}
|
||||
a.sessionTTL = sessionTTL
|
||||
a.sessions = make(map[string]*session)
|
||||
rand.Seed(time.Now().UTC().Unix())
|
||||
var err error
|
||||
a.db, err = bbolt.Open(dbFilename, 0o644, nil)
|
||||
if err != nil {
|
||||
|
@ -275,23 +276,28 @@ type loginJSON struct {
|
|||
Password string `json:"password"`
|
||||
}
|
||||
|
||||
func getSession(u *User) []byte {
|
||||
// the developers don't currently believe that using a
|
||||
// non-cryptographic RNG for the session hash salt is
|
||||
// insecure
|
||||
salt := rand.Uint32() //nolint:gosec
|
||||
d := []byte(fmt.Sprintf("%d%s%s", salt, u.Name, u.PasswordHash))
|
||||
hash := sha256.Sum256(d)
|
||||
return hash[:]
|
||||
}
|
||||
|
||||
func (a *Auth) httpCookie(req loginJSON) string {
|
||||
u := a.UserFind(req.Name, req.Password)
|
||||
if len(u.Name) == 0 {
|
||||
return ""
|
||||
func getSession(u *User) ([]byte, error) {
|
||||
maxSalt := big.NewInt(math.MaxUint32)
|
||||
salt, err := rand.Int(rand.Reader, maxSalt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sess := getSession(&u)
|
||||
d := []byte(fmt.Sprintf("%s%s%s", salt, u.Name, u.PasswordHash))
|
||||
hash := sha256.Sum256(d)
|
||||
return hash[:], nil
|
||||
}
|
||||
|
||||
func (a *Auth) httpCookie(req loginJSON) (string, error) {
|
||||
u := a.UserFind(req.Name, req.Password)
|
||||
if len(u.Name) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
sess, err := getSession(&u)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
now := time.Now().UTC()
|
||||
expire := now.Add(cookieTTL * time.Hour)
|
||||
|
@ -305,7 +311,7 @@ func (a *Auth) httpCookie(req loginJSON) string {
|
|||
a.addSession(sess, &s)
|
||||
|
||||
return fmt.Sprintf("%s=%s; Path=/; HttpOnly; Expires=%s",
|
||||
sessionCookieName, hex.EncodeToString(sess), expstr)
|
||||
sessionCookieName, hex.EncodeToString(sess), expstr), nil
|
||||
}
|
||||
|
||||
func handleLogin(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -316,7 +322,11 @@ func handleLogin(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
cookie := Context.auth.httpCookie(req)
|
||||
cookie, err := Context.auth.httpCookie(req)
|
||||
if err != nil {
|
||||
httpError(w, http.StatusBadRequest, "crypto rand reader: %s", err)
|
||||
return
|
||||
}
|
||||
if len(cookie) == 0 {
|
||||
log.Info("Auth: invalid user name or password: name=%q", req.Name)
|
||||
time.Sleep(1 * time.Second)
|
||||
|
@ -369,7 +379,54 @@ func parseCookie(cookie string) string {
|
|||
return ""
|
||||
}
|
||||
|
||||
// nolint(gocyclo)
|
||||
// optionalAuthThird return true if user should authenticate first.
|
||||
func optionalAuthThird(w http.ResponseWriter, r *http.Request) (authFirst bool) {
|
||||
authFirst = false
|
||||
|
||||
// redirect to login page if not authenticated
|
||||
ok := false
|
||||
cookie, err := r.Cookie(sessionCookieName)
|
||||
|
||||
if glProcessCookie(r) {
|
||||
log.Debug("Auth: authentification was handled by GL-Inet submodule")
|
||||
ok = true
|
||||
|
||||
} else if err == nil {
|
||||
r := Context.auth.CheckSession(cookie.Value)
|
||||
if r == 0 {
|
||||
ok = true
|
||||
} else if r < 0 {
|
||||
log.Debug("Auth: invalid cookie value: %s", cookie)
|
||||
}
|
||||
} else {
|
||||
// there's no Cookie, check Basic authentication
|
||||
user, pass, ok2 := r.BasicAuth()
|
||||
if ok2 {
|
||||
u := Context.auth.UserFind(user, pass)
|
||||
if len(u.Name) != 0 {
|
||||
ok = true
|
||||
} else {
|
||||
log.Info("Auth: invalid Basic Authorization value")
|
||||
}
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
if r.URL.Path == "/" || r.URL.Path == "/index.html" {
|
||||
if glProcessRedirect(w, r) {
|
||||
log.Debug("Auth: redirected to login page by GL-Inet submodule")
|
||||
} else {
|
||||
w.Header().Set("Location", "/login.html")
|
||||
w.WriteHeader(http.StatusFound)
|
||||
}
|
||||
} else {
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
_, _ = w.Write([]byte("Forbidden"))
|
||||
}
|
||||
authFirst = true
|
||||
}
|
||||
return authFirst
|
||||
}
|
||||
|
||||
func optionalAuth(handler func(http.ResponseWriter, *http.Request)) func(http.ResponseWriter, *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == "/login.html" {
|
||||
|
@ -392,45 +449,7 @@ func optionalAuth(handler func(http.ResponseWriter, *http.Request)) func(http.Re
|
|||
// process as usual
|
||||
// no additional auth requirements
|
||||
} else if Context.auth != nil && Context.auth.AuthRequired() {
|
||||
// redirect to login page if not authenticated
|
||||
ok := false
|
||||
cookie, err := r.Cookie(sessionCookieName)
|
||||
|
||||
if glProcessCookie(r) {
|
||||
log.Debug("Auth: authentification was handled by GL-Inet submodule")
|
||||
ok = true
|
||||
|
||||
} else if err == nil {
|
||||
r := Context.auth.CheckSession(cookie.Value)
|
||||
if r == 0 {
|
||||
ok = true
|
||||
} else if r < 0 {
|
||||
log.Debug("Auth: invalid cookie value: %s", cookie)
|
||||
}
|
||||
} else {
|
||||
// there's no Cookie, check Basic authentication
|
||||
user, pass, ok2 := r.BasicAuth()
|
||||
if ok2 {
|
||||
u := Context.auth.UserFind(user, pass)
|
||||
if len(u.Name) != 0 {
|
||||
ok = true
|
||||
} else {
|
||||
log.Info("Auth: invalid Basic Authorization value")
|
||||
}
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
if r.URL.Path == "/" || r.URL.Path == "/index.html" {
|
||||
if glProcessRedirect(w, r) {
|
||||
log.Debug("Auth: redirected to login page by GL-Inet submodule")
|
||||
} else {
|
||||
w.Header().Set("Location", "/login.html")
|
||||
w.WriteHeader(http.StatusFound)
|
||||
}
|
||||
} else {
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
_, _ = w.Write([]byte("Forbidden"))
|
||||
}
|
||||
if optionalAuthThird(w, r) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,7 +41,8 @@ func TestAuth(t *testing.T) {
|
|||
assert.True(t, a.CheckSession("notfound") == -1)
|
||||
a.RemoveSession("notfound")
|
||||
|
||||
sess := getSession(&users[0])
|
||||
sess, err := getSession(&users[0])
|
||||
assert.Nil(t, err)
|
||||
sessStr := hex.EncodeToString(sess)
|
||||
|
||||
now := time.Now().UTC().Unix()
|
||||
|
@ -136,7 +137,8 @@ func TestAuthHTTP(t *testing.T) {
|
|||
assert.True(t, handlerCalled)
|
||||
|
||||
// perform login
|
||||
cookie := Context.auth.httpCookie(loginJSON{Name: "name", Password: "password"})
|
||||
cookie, err := Context.auth.httpCookie(loginJSON{Name: "name", Password: "password"})
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, cookie != "")
|
||||
|
||||
// get /
|
||||
|
|
|
@ -88,60 +88,6 @@ func isRunning() bool {
|
|||
return Context.dnsServer != nil && Context.dnsServer.IsRunning()
|
||||
}
|
||||
|
||||
// nolint (gocyclo)
|
||||
// Return TRUE if IP is within public Internet IP range
|
||||
func isPublicIP(ip net.IP) bool {
|
||||
ip4 := ip.To4()
|
||||
if ip4 != nil {
|
||||
switch ip4[0] {
|
||||
case 0:
|
||||
return false // software
|
||||
case 10:
|
||||
return false // private network
|
||||
case 127:
|
||||
return false // loopback
|
||||
case 169:
|
||||
if ip4[1] == 254 {
|
||||
return false // link-local
|
||||
}
|
||||
case 172:
|
||||
if ip4[1] >= 16 && ip4[1] <= 31 {
|
||||
return false // private network
|
||||
}
|
||||
case 192:
|
||||
if (ip4[1] == 0 && ip4[2] == 0) || // private network
|
||||
(ip4[1] == 0 && ip4[2] == 2) || // documentation
|
||||
(ip4[1] == 88 && ip4[2] == 99) || // reserved
|
||||
(ip4[1] == 168) { // private network
|
||||
return false
|
||||
}
|
||||
case 198:
|
||||
if (ip4[1] == 18 || ip4[2] == 19) || // private network
|
||||
(ip4[1] == 51 || ip4[2] == 100) { // documentation
|
||||
return false
|
||||
}
|
||||
case 203:
|
||||
if ip4[1] == 0 && ip4[2] == 113 { // documentation
|
||||
return false
|
||||
}
|
||||
case 224:
|
||||
if ip4[1] == 0 && ip4[2] == 0 { // multicast
|
||||
return false
|
||||
}
|
||||
case 255:
|
||||
if ip4[1] == 255 && ip4[2] == 255 && ip4[3] == 255 { // subnet
|
||||
return false
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if ip.IsLoopback() || ip.IsLinkLocalMulticast() || ip.IsLinkLocalUnicast() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func onDNSRequest(d *proxy.DNSContext) {
|
||||
ip := dnsforward.GetIPString(d.Addr)
|
||||
if ip == "" {
|
||||
|
@ -153,7 +99,7 @@ func onDNSRequest(d *proxy.DNSContext) {
|
|||
if !ipAddr.IsLoopback() {
|
||||
Context.rdns.Begin(ip)
|
||||
}
|
||||
if isPublicIP(ipAddr) {
|
||||
if !Context.ipDetector.detectSpecialNetwork(ipAddr) {
|
||||
Context.whois.Begin(ip)
|
||||
}
|
||||
}
|
||||
|
@ -327,7 +273,7 @@ func startDNSServer() error {
|
|||
if !ipAddr.IsLoopback() {
|
||||
Context.rdns.Begin(ip)
|
||||
}
|
||||
if isPublicIP(ipAddr) {
|
||||
if !Context.ipDetector.detectSpecialNetwork(ipAddr) {
|
||||
Context.whois.Begin(ip)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"hash/crc32"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
@ -497,46 +498,7 @@ func (f *Filtering) update(filter *filter) (bool, error) {
|
|||
return b, err
|
||||
}
|
||||
|
||||
// nolint(gocyclo)
|
||||
func (f *Filtering) updateIntl(filter *filter) (bool, error) {
|
||||
log.Tracef("Downloading update for filter %d from %s", filter.ID, filter.URL)
|
||||
|
||||
tmpFile, err := ioutil.TempFile(filepath.Join(Context.getDataDir(), filterDir), "")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer func() {
|
||||
if tmpFile != nil {
|
||||
_ = tmpFile.Close()
|
||||
_ = os.Remove(tmpFile.Name())
|
||||
}
|
||||
}()
|
||||
|
||||
var reader io.Reader
|
||||
if filepath.IsAbs(filter.URL) {
|
||||
f, err := os.Open(filter.URL)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("open file: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
reader = f
|
||||
} else {
|
||||
resp, err := Context.client.Get(filter.URL)
|
||||
if resp != nil && resp.Body != nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("Couldn't request filter from URL %s, skipping: %s", filter.URL, err)
|
||||
return false, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
log.Printf("Got status code %d from URL %s, skipping", resp.StatusCode, filter.URL)
|
||||
return false, fmt.Errorf("got status code != 200: %d", resp.StatusCode)
|
||||
}
|
||||
reader = resp.Body
|
||||
}
|
||||
|
||||
func (f *Filtering) read(reader io.Reader, tmpFile *os.File, filter *filter) (int, error) {
|
||||
htmlTest := true
|
||||
firstChunk := make([]byte, 4*1024)
|
||||
firstChunkLen := 0
|
||||
|
@ -556,12 +518,12 @@ func (f *Filtering) updateIntl(filter *filter) (bool, error) {
|
|||
|
||||
if firstChunkLen == len(firstChunk) || err == io.EOF {
|
||||
if !isPrintableText(firstChunk, firstChunkLen) {
|
||||
return false, fmt.Errorf("data contains non-printable characters")
|
||||
return total, fmt.Errorf("data contains non-printable characters")
|
||||
}
|
||||
|
||||
s := strings.ToLower(string(firstChunk))
|
||||
if strings.Contains(s, "<html") || strings.Contains(s, "<!doctype") {
|
||||
return false, fmt.Errorf("data is HTML, not plain text")
|
||||
return total, fmt.Errorf("data is HTML, not plain text")
|
||||
}
|
||||
|
||||
htmlTest = false
|
||||
|
@ -571,17 +533,70 @@ func (f *Filtering) updateIntl(filter *filter) (bool, error) {
|
|||
|
||||
_, err2 := tmpFile.Write(buf[:n])
|
||||
if err2 != nil {
|
||||
return false, err2
|
||||
return total, err2
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
break
|
||||
return total, nil
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("Couldn't fetch filter contents from URL %s, skipping: %s", filter.URL, err)
|
||||
return false, err
|
||||
return total, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateIntl returns true if filter update performed successfully.
|
||||
func (f *Filtering) updateIntl(filter *filter) (updated bool, err error) {
|
||||
updated = false
|
||||
log.Tracef("Downloading update for filter %d from %s", filter.ID, filter.URL)
|
||||
|
||||
tmpFile, err := ioutil.TempFile(filepath.Join(Context.getDataDir(), filterDir), "")
|
||||
if err != nil {
|
||||
return updated, err
|
||||
}
|
||||
defer func() {
|
||||
if tmpFile != nil {
|
||||
if err := tmpFile.Close(); err != nil {
|
||||
log.Printf("Couldn't close temporary file: %s", err)
|
||||
}
|
||||
tmpFileName := tmpFile.Name()
|
||||
if err := os.Remove(tmpFileName); err != nil {
|
||||
log.Printf("Couldn't delete temporary file %s: %s", tmpFileName, err)
|
||||
}
|
||||
|
||||
}
|
||||
}()
|
||||
|
||||
var reader io.Reader
|
||||
if filepath.IsAbs(filter.URL) {
|
||||
f, err := os.Open(filter.URL)
|
||||
if err != nil {
|
||||
return updated, fmt.Errorf("open file: %w", err)
|
||||
}
|
||||
defer f.Close()
|
||||
reader = f
|
||||
} else {
|
||||
resp, err := Context.client.Get(filter.URL)
|
||||
if resp != nil && resp.Body != nil {
|
||||
defer resp.Body.Close()
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("Couldn't request filter from URL %s, skipping: %s", filter.URL, err)
|
||||
return updated, err
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
log.Printf("Got status code %d from URL %s, skipping", resp.StatusCode, filter.URL)
|
||||
return updated, fmt.Errorf("got status code != 200: %d", resp.StatusCode)
|
||||
}
|
||||
reader = resp.Body
|
||||
}
|
||||
|
||||
total, err := f.read(reader, tmpFile, filter)
|
||||
if err != nil {
|
||||
return updated, err
|
||||
}
|
||||
|
||||
// Extract filter name and count number of rules
|
||||
_, _ = tmpFile.Seek(0, io.SeekStart)
|
||||
|
@ -589,7 +604,7 @@ func (f *Filtering) updateIntl(filter *filter) (bool, error) {
|
|||
// Check if the filter has been really changed
|
||||
if filter.checksum == checksum {
|
||||
log.Tracef("Filter #%d at URL %s hasn't changed, not updating it", filter.ID, filter.URL)
|
||||
return false, nil
|
||||
return updated, nil
|
||||
}
|
||||
|
||||
log.Printf("Filter %d has been updated: %d bytes, %d rules",
|
||||
|
@ -606,11 +621,12 @@ func (f *Filtering) updateIntl(filter *filter) (bool, error) {
|
|||
_ = tmpFile.Close()
|
||||
err = os.Rename(tmpFile.Name(), filterFilePath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return updated, err
|
||||
}
|
||||
tmpFile = nil
|
||||
updated = true
|
||||
|
||||
return true, nil
|
||||
return updated, nil
|
||||
}
|
||||
|
||||
// loads filter contents from the file in dataDir
|
||||
|
|
|
@ -65,6 +65,8 @@ type homeContext struct {
|
|||
autoHosts util.AutoHosts // IP-hostname pairs taken from system configuration (e.g. /etc/hosts) files
|
||||
updater *update.Updater
|
||||
|
||||
ipDetector *ipDetector
|
||||
|
||||
// Runtime properties
|
||||
// --
|
||||
|
||||
|
@ -140,28 +142,7 @@ func version() string {
|
|||
return fmt.Sprintf(msg, versionString, updateChannel, runtime.GOOS, runtime.GOARCH)
|
||||
}
|
||||
|
||||
// run initializes configuration and runs the AdGuard Home
|
||||
// run is a blocking method!
|
||||
// nolint
|
||||
func run(args options) {
|
||||
// configure config filename
|
||||
initConfigFilename(args)
|
||||
|
||||
// configure working dir and config path
|
||||
initWorkingDir(args)
|
||||
|
||||
// configure log level and output
|
||||
configureLogger(args)
|
||||
|
||||
// Go memory hacks
|
||||
memoryUsage(args)
|
||||
|
||||
// print the first message after logger is configured
|
||||
log.Println(version())
|
||||
log.Debug("Current working directory is %s", Context.workDir)
|
||||
if args.runningAsService {
|
||||
log.Info("AdGuard Home is running as a service")
|
||||
}
|
||||
func setupContext(args options) {
|
||||
Context.runningAsService = args.runningAsService
|
||||
Context.disableUpdate = args.disableUpdate
|
||||
|
||||
|
@ -179,7 +160,8 @@ func run(args options) {
|
|||
DialContext: customDialContext,
|
||||
Proxy: getHTTPProxy,
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: Context.tlsRoots,
|
||||
RootCAs: Context.tlsRoots,
|
||||
MinVersion: tls.VersionTLS12,
|
||||
},
|
||||
}
|
||||
Context.client = &http.Client{
|
||||
|
@ -205,12 +187,9 @@ func run(args options) {
|
|||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 'clients' module uses 'dnsfilter' module's static data (dnsfilter.BlockedSvcKnown()),
|
||||
// so we have to initialize dnsfilter's static data first,
|
||||
// but also avoid relying on automatic Go init() function
|
||||
dnsfilter.InitModule()
|
||||
|
||||
func setupConfig(args options) {
|
||||
config.DHCP.WorkDir = Context.workDir
|
||||
config.DHCP.HTTPRegister = httpRegister
|
||||
config.DHCP.ConfigModified = onConfigModified
|
||||
|
@ -251,6 +230,37 @@ func run(args options) {
|
|||
if len(args.pidFile) != 0 && writePIDFile(args.pidFile) {
|
||||
Context.pidFileName = args.pidFile
|
||||
}
|
||||
}
|
||||
|
||||
// run performs configurating and starts AdGuard Home.
|
||||
func run(args options) {
|
||||
// configure config filename
|
||||
initConfigFilename(args)
|
||||
|
||||
// configure working dir and config path
|
||||
initWorkingDir(args)
|
||||
|
||||
// configure log level and output
|
||||
configureLogger(args)
|
||||
|
||||
// Go memory hacks
|
||||
memoryUsage(args)
|
||||
|
||||
// print the first message after logger is configured
|
||||
log.Println(version())
|
||||
log.Debug("Current working directory is %s", Context.workDir)
|
||||
if args.runningAsService {
|
||||
log.Info("AdGuard Home is running as a service")
|
||||
}
|
||||
|
||||
setupContext(args)
|
||||
|
||||
// clients package uses dnsfilter package's static data (dnsfilter.BlockedSvcKnown()),
|
||||
// so we have to initialize dnsfilter's static data first,
|
||||
// but also avoid relying on automatic Go init() function
|
||||
dnsfilter.InitModule()
|
||||
|
||||
setupConfig(args)
|
||||
|
||||
if !Context.firstRun {
|
||||
// Save the updated config
|
||||
|
@ -322,6 +332,11 @@ func run(args options) {
|
|||
}
|
||||
}
|
||||
|
||||
Context.ipDetector, err = newIPDetector()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
Context.web.Start()
|
||||
|
||||
// wait indefinitely for other go-routines to complete their job
|
||||
|
|
72
internal/home/ipdetector.go
Normal file
72
internal/home/ipdetector.go
Normal file
|
@ -0,0 +1,72 @@
|
|||
package home
|
||||
|
||||
import "net"
|
||||
|
||||
// ipDetector describes IP address properties.
|
||||
type ipDetector struct {
|
||||
nets []*net.IPNet
|
||||
}
|
||||
|
||||
// newIPDetector returns a new IP detector.
|
||||
func newIPDetector() (ipd *ipDetector, err error) {
|
||||
specialNetworks := []string{
|
||||
"0.0.0.0/8",
|
||||
"10.0.0.0/8",
|
||||
"100.64.0.0/10",
|
||||
"127.0.0.0/8",
|
||||
"169.254.0.0/16",
|
||||
"172.16.0.0/12",
|
||||
"192.0.0.0/24",
|
||||
"192.0.0.0/29",
|
||||
"192.0.2.0/24",
|
||||
"192.88.99.0/24",
|
||||
"192.168.0.0/16",
|
||||
"198.18.0.0/15",
|
||||
"198.51.100.0/24",
|
||||
"203.0.113.0/24",
|
||||
"240.0.0.0/4",
|
||||
"255.255.255.255/32",
|
||||
"::1/128",
|
||||
"::/128",
|
||||
"64:ff9b::/96",
|
||||
// Since this network is used for mapping IPv4 addresses, we
|
||||
// don't include it.
|
||||
// "::ffff:0:0/96",
|
||||
"100::/64",
|
||||
"2001::/23",
|
||||
"2001::/32",
|
||||
"2001:2::/48",
|
||||
"2001:db8::/32",
|
||||
"2001:10::/28",
|
||||
"2002::/16",
|
||||
"fc00::/7",
|
||||
"fe80::/10",
|
||||
}
|
||||
|
||||
ipd = &ipDetector{
|
||||
nets: make([]*net.IPNet, len(specialNetworks)),
|
||||
}
|
||||
for i, ipnetStr := range specialNetworks {
|
||||
_, ipnet, err := net.ParseCIDR(ipnetStr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ipd.nets[i] = ipnet
|
||||
}
|
||||
|
||||
return ipd, nil
|
||||
}
|
||||
|
||||
// detectSpecialNetwork returns true if IP address is contained by any of
|
||||
// special-purpose IP address registries according to RFC-6890
|
||||
// (https://tools.ietf.org/html/rfc6890).
|
||||
func (ipd *ipDetector) detectSpecialNetwork(ip net.IP) bool {
|
||||
for _, ipnet := range ipd.nets {
|
||||
if ipnet.Contains(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
146
internal/home/ipdetector_test.go
Normal file
146
internal/home/ipdetector_test.go
Normal file
|
@ -0,0 +1,146 @@
|
|||
package home
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestIPDetector_detectSpecialNetwork(t *testing.T) {
|
||||
var ipd *ipDetector
|
||||
|
||||
t.Run("newIPDetector", func(t *testing.T) {
|
||||
var err error
|
||||
ipd, err = newIPDetector()
|
||||
assert.Nil(t, err)
|
||||
})
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
ip net.IP
|
||||
want bool
|
||||
}{{
|
||||
name: "not_specific",
|
||||
ip: net.ParseIP("8.8.8.8"),
|
||||
want: false,
|
||||
}, {
|
||||
name: "this_host_on_this_network",
|
||||
ip: net.ParseIP("0.0.0.0"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "private-Use",
|
||||
ip: net.ParseIP("10.0.0.0"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "shared_address_space",
|
||||
ip: net.ParseIP("100.64.0.0"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "loopback",
|
||||
ip: net.ParseIP("127.0.0.0"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "link_local",
|
||||
ip: net.ParseIP("169.254.0.0"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "private-use",
|
||||
ip: net.ParseIP("172.16.0.0"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "ietf_protocol_assignments",
|
||||
ip: net.ParseIP("192.0.0.0"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "ds-lite",
|
||||
ip: net.ParseIP("192.0.0.0"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "documentation_(test-net-1)",
|
||||
ip: net.ParseIP("192.0.2.0"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "6to4_relay_anycast",
|
||||
ip: net.ParseIP("192.88.99.0"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "private-use",
|
||||
ip: net.ParseIP("192.168.0.0"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "benchmarking",
|
||||
ip: net.ParseIP("198.18.0.0"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "documentation_(test-net-2)",
|
||||
ip: net.ParseIP("198.51.100.0"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "documentation_(test-net-3)",
|
||||
ip: net.ParseIP("203.0.113.0"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "reserved",
|
||||
ip: net.ParseIP("240.0.0.0"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "limited_broadcast",
|
||||
ip: net.ParseIP("255.255.255.255"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "loopback_address",
|
||||
ip: net.ParseIP("::1"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "unspecified_address",
|
||||
ip: net.ParseIP("::"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "ipv4-ipv6_translation",
|
||||
ip: net.ParseIP("64:ff9b::"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "discard-only_address_block",
|
||||
ip: net.ParseIP("100::"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "ietf_protocol_assignments",
|
||||
ip: net.ParseIP("2001::"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "teredo",
|
||||
ip: net.ParseIP("2001::"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "benchmarking",
|
||||
ip: net.ParseIP("2001:2::"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "documentation",
|
||||
ip: net.ParseIP("2001:db8::"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "orchid",
|
||||
ip: net.ParseIP("2001:10::"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "6to4",
|
||||
ip: net.ParseIP("2002::"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "unique-local",
|
||||
ip: net.ParseIP("fc00::"),
|
||||
want: true,
|
||||
}, {
|
||||
name: "linked-scoped_unicast",
|
||||
ip: net.ParseIP("fe80::"),
|
||||
want: true,
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
assert.Equal(t, tc.want, ipd.detectSpecialNetwork(tc.ip))
|
||||
})
|
||||
}
|
||||
}
|
|
@ -2,6 +2,7 @@ package querylog
|
|||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -11,86 +12,206 @@ import (
|
|||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
// decodeLogEntry - decodes query log entry from a line
|
||||
// nolint (gocyclo)
|
||||
var logEntryHandlers = map[string](func(t json.Token, ent *logEntry) error){
|
||||
"IP": func(t json.Token, ent *logEntry) error {
|
||||
v, ok := t.(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if len(ent.IP) == 0 {
|
||||
ent.IP = v
|
||||
}
|
||||
return nil
|
||||
},
|
||||
"T": func(t json.Token, ent *logEntry) error {
|
||||
v, ok := t.(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
ent.Time, err = time.Parse(time.RFC3339, v)
|
||||
return err
|
||||
},
|
||||
"QH": func(t json.Token, ent *logEntry) error {
|
||||
v, ok := t.(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
ent.QHost = v
|
||||
return nil
|
||||
},
|
||||
"QT": func(t json.Token, ent *logEntry) error {
|
||||
v, ok := t.(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
ent.QType = v
|
||||
return nil
|
||||
},
|
||||
"QC": func(t json.Token, ent *logEntry) error {
|
||||
v, ok := t.(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
ent.QClass = v
|
||||
return nil
|
||||
},
|
||||
"CP": func(t json.Token, ent *logEntry) error {
|
||||
v, ok := t.(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
ent.ClientProto, err = NewClientProto(v)
|
||||
return err
|
||||
},
|
||||
"Answer": func(t json.Token, ent *logEntry) error {
|
||||
v, ok := t.(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
ent.Answer, err = base64.StdEncoding.DecodeString(v)
|
||||
return err
|
||||
},
|
||||
"OrigAnswer": func(t json.Token, ent *logEntry) error {
|
||||
v, ok := t.(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
ent.OrigAnswer, err = base64.StdEncoding.DecodeString(v)
|
||||
return err
|
||||
},
|
||||
"IsFiltered": func(t json.Token, ent *logEntry) error {
|
||||
v, ok := t.(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
b, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ent.Result.IsFiltered = b
|
||||
return nil
|
||||
},
|
||||
"Rule": func(t json.Token, ent *logEntry) error {
|
||||
v, ok := t.(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
ent.Result.Rule = v
|
||||
return nil
|
||||
},
|
||||
"FilterID": func(t json.Token, ent *logEntry) error {
|
||||
v, ok := t.(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
i, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ent.Result.FilterID = int64(i)
|
||||
return nil
|
||||
},
|
||||
"Reason": func(t json.Token, ent *logEntry) error {
|
||||
v, ok := t.(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
i, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ent.Result.Reason = dnsfilter.Reason(i)
|
||||
return nil
|
||||
},
|
||||
"ServiceName": func(t json.Token, ent *logEntry) error {
|
||||
v, ok := t.(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
ent.Result.ServiceName = v
|
||||
return nil
|
||||
},
|
||||
"Upstream": func(t json.Token, ent *logEntry) error {
|
||||
v, ok := t.(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
ent.Upstream = v
|
||||
return nil
|
||||
},
|
||||
"Elapsed": func(t json.Token, ent *logEntry) error {
|
||||
v, ok := t.(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
i, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ent.Elapsed = time.Duration(i)
|
||||
return nil
|
||||
},
|
||||
"Question": func(t json.Token, ent *logEntry) error {
|
||||
v, ok := t.(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
var qstr []byte
|
||||
qstr, err := base64.StdEncoding.DecodeString(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
q := new(dns.Msg)
|
||||
err = q.Unpack(qstr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ent.QHost = q.Question[0].Name
|
||||
if len(ent.QHost) == 0 {
|
||||
return nil // nil???
|
||||
}
|
||||
ent.QHost = ent.QHost[:len(ent.QHost)-1]
|
||||
ent.QType = dns.TypeToString[q.Question[0].Qtype]
|
||||
ent.QClass = dns.ClassToString[q.Question[0].Qclass]
|
||||
return nil
|
||||
},
|
||||
"Time": func(t json.Token, ent *logEntry) error {
|
||||
v, ok := t.(string)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
var err error
|
||||
ent.Time, err = time.Parse(time.RFC3339, v)
|
||||
return err
|
||||
},
|
||||
}
|
||||
|
||||
func decodeLogEntry(ent *logEntry, str string) {
|
||||
var b bool
|
||||
var i int
|
||||
var err error
|
||||
for {
|
||||
k, v, t := readJSON(&str)
|
||||
if t == jsonTErr {
|
||||
break
|
||||
}
|
||||
switch k {
|
||||
case "IP":
|
||||
if len(ent.IP) == 0 {
|
||||
ent.IP = v
|
||||
}
|
||||
case "T":
|
||||
ent.Time, err = time.Parse(time.RFC3339, v)
|
||||
|
||||
case "QH":
|
||||
ent.QHost = v
|
||||
case "QT":
|
||||
ent.QType = v
|
||||
case "QC":
|
||||
ent.QClass = v
|
||||
|
||||
case "CP":
|
||||
ent.ClientProto, err = NewClientProto(v)
|
||||
|
||||
case "Answer":
|
||||
ent.Answer, err = base64.StdEncoding.DecodeString(v)
|
||||
case "OrigAnswer":
|
||||
ent.OrigAnswer, err = base64.StdEncoding.DecodeString(v)
|
||||
|
||||
case "IsFiltered":
|
||||
b, err = strconv.ParseBool(v)
|
||||
ent.Result.IsFiltered = b
|
||||
case "Rule":
|
||||
ent.Result.Rule = v
|
||||
case "FilterID":
|
||||
i, err = strconv.Atoi(v)
|
||||
ent.Result.FilterID = int64(i)
|
||||
case "Reason":
|
||||
i, err = strconv.Atoi(v)
|
||||
ent.Result.Reason = dnsfilter.Reason(i)
|
||||
case "ServiceName":
|
||||
ent.Result.ServiceName = v
|
||||
|
||||
case "Upstream":
|
||||
ent.Upstream = v
|
||||
case "Elapsed":
|
||||
i, err = strconv.Atoi(v)
|
||||
ent.Elapsed = time.Duration(i)
|
||||
|
||||
// pre-v0.99.3 compatibility:
|
||||
case "Question":
|
||||
var qstr []byte
|
||||
qstr, err = base64.StdEncoding.DecodeString(v)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
q := new(dns.Msg)
|
||||
err = q.Unpack(qstr)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
ent.QHost = q.Question[0].Name
|
||||
if len(ent.QHost) == 0 {
|
||||
break
|
||||
}
|
||||
ent.QHost = ent.QHost[:len(ent.QHost)-1]
|
||||
ent.QType = dns.TypeToString[q.Question[0].Qtype]
|
||||
ent.QClass = dns.ClassToString[q.Question[0].Qclass]
|
||||
case "Time":
|
||||
ent.Time, err = time.Parse(time.RFC3339, v)
|
||||
}
|
||||
|
||||
dec := json.NewDecoder(strings.NewReader(str))
|
||||
for dec.More() {
|
||||
keyToken, err := dec.Token()
|
||||
if err != nil {
|
||||
log.Debug("decodeLogEntry err: %s", err)
|
||||
break
|
||||
return
|
||||
}
|
||||
if _, ok := keyToken.(json.Delim); ok {
|
||||
continue
|
||||
}
|
||||
key := keyToken.(string)
|
||||
handler, ok := logEntryHandlers[key]
|
||||
value, err := dec.Token()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if ok {
|
||||
if err := handler(value, ent); err != nil {
|
||||
log.Debug("decodeLogEntry err: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -109,72 +230,3 @@ func readJSONValue(s, name string) string {
|
|||
end := start + i
|
||||
return s[start:end]
|
||||
}
|
||||
|
||||
const (
|
||||
jsonTErr = iota
|
||||
jsonTObj
|
||||
jsonTStr
|
||||
jsonTNum
|
||||
jsonTBool
|
||||
)
|
||||
|
||||
// Parse JSON key-value pair
|
||||
// e.g.: "key":VALUE where VALUE is "string", true|false (boolean), or 123.456 (number)
|
||||
// Note the limitations:
|
||||
// . doesn't support whitespace
|
||||
// . doesn't support "null"
|
||||
// . doesn't validate boolean or number
|
||||
// . no proper handling of {} braces
|
||||
// . no handling of [] brackets
|
||||
// Return (key, value, type)
|
||||
func readJSON(ps *string) (string, string, int32) {
|
||||
s := *ps
|
||||
k := ""
|
||||
v := ""
|
||||
t := int32(jsonTErr)
|
||||
|
||||
q1 := strings.IndexByte(s, '"')
|
||||
if q1 == -1 {
|
||||
return k, v, t
|
||||
}
|
||||
q2 := strings.IndexByte(s[q1+1:], '"')
|
||||
if q2 == -1 {
|
||||
return k, v, t
|
||||
}
|
||||
k = s[q1+1 : q1+1+q2]
|
||||
s = s[q1+1+q2+1:]
|
||||
|
||||
if len(s) < 2 || s[0] != ':' {
|
||||
return k, v, t
|
||||
}
|
||||
|
||||
if s[1] == '"' {
|
||||
q2 = strings.IndexByte(s[2:], '"')
|
||||
if q2 == -1 {
|
||||
return k, v, t
|
||||
}
|
||||
v = s[2 : 2+q2]
|
||||
t = jsonTStr
|
||||
s = s[2+q2+1:]
|
||||
|
||||
} else if s[1] == '{' {
|
||||
t = jsonTObj
|
||||
s = s[1+1:]
|
||||
|
||||
} else {
|
||||
sep := strings.IndexAny(s[1:], ",}")
|
||||
if sep == -1 {
|
||||
return k, v, t
|
||||
}
|
||||
v = s[1 : 1+sep]
|
||||
if s[1] == 't' || s[1] == 'f' {
|
||||
t = jsonTBool
|
||||
} else if s[1] == '.' || (s[1] >= '0' && s[1] <= '9') {
|
||||
t = jsonTNum
|
||||
}
|
||||
s = s[1+sep+1:]
|
||||
}
|
||||
|
||||
*ps = s
|
||||
return k, v, t
|
||||
}
|
||||
|
|
|
@ -48,30 +48,3 @@ func TestDecode_decodeQueryLog(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJSON(t *testing.T) {
|
||||
s := `
|
||||
{"keystr":"val","obj":{"keybool":true,"keyint":123456}}
|
||||
`
|
||||
k, v, jtype := readJSON(&s)
|
||||
assert.Equal(t, jtype, int32(jsonTStr))
|
||||
assert.Equal(t, "keystr", k)
|
||||
assert.Equal(t, "val", v)
|
||||
|
||||
k, _, jtype = readJSON(&s)
|
||||
assert.Equal(t, jtype, int32(jsonTObj))
|
||||
assert.Equal(t, "obj", k)
|
||||
|
||||
k, v, jtype = readJSON(&s)
|
||||
assert.Equal(t, jtype, int32(jsonTBool))
|
||||
assert.Equal(t, "keybool", k)
|
||||
assert.Equal(t, "true", v)
|
||||
|
||||
k, v, jtype = readJSON(&s)
|
||||
assert.Equal(t, jtype, int32(jsonTNum))
|
||||
assert.Equal(t, "keyint", k)
|
||||
assert.Equal(t, "123456", v)
|
||||
|
||||
_, _, jtype = readJSON(&s)
|
||||
assert.True(t, jtype == jsonTErr)
|
||||
}
|
||||
|
|
|
@ -77,66 +77,79 @@ func (c *searchCriteria) quickMatchJSONValue(line string, propertyName string) b
|
|||
}
|
||||
|
||||
// match - checks if the log entry matches this search criteria
|
||||
// nolint (gocyclo)
|
||||
func (c *searchCriteria) match(entry *logEntry) bool {
|
||||
switch c.criteriaType {
|
||||
case ctDomainOrClient:
|
||||
qhost := strings.ToLower(entry.QHost)
|
||||
searchVal := strings.ToLower(c.value)
|
||||
if c.strict && qhost == searchVal {
|
||||
return true
|
||||
}
|
||||
if !c.strict && strings.Contains(qhost, searchVal) {
|
||||
return true
|
||||
}
|
||||
|
||||
if c.strict && entry.IP == c.value {
|
||||
return true
|
||||
}
|
||||
if !c.strict && strings.Contains(entry.IP, c.value) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
|
||||
return c.ctDomainOrClientCase(entry)
|
||||
case ctFilteringStatus:
|
||||
res := entry.Result
|
||||
|
||||
switch c.value {
|
||||
case filteringStatusAll:
|
||||
return true
|
||||
case filteringStatusFiltered:
|
||||
return res.IsFiltered ||
|
||||
res.Reason == dnsfilter.NotFilteredWhiteList ||
|
||||
res.Reason == dnsfilter.ReasonRewrite ||
|
||||
res.Reason == dnsfilter.RewriteEtcHosts
|
||||
case filteringStatusBlocked:
|
||||
return res.IsFiltered &&
|
||||
(res.Reason == dnsfilter.FilteredBlackList ||
|
||||
res.Reason == dnsfilter.FilteredBlockedService)
|
||||
case filteringStatusBlockedService:
|
||||
return res.IsFiltered && res.Reason == dnsfilter.FilteredBlockedService
|
||||
case filteringStatusBlockedParental:
|
||||
return res.IsFiltered && res.Reason == dnsfilter.FilteredParental
|
||||
case filteringStatusBlockedSafebrowsing:
|
||||
return res.IsFiltered && res.Reason == dnsfilter.FilteredSafeBrowsing
|
||||
case filteringStatusWhitelisted:
|
||||
return res.Reason == dnsfilter.NotFilteredWhiteList
|
||||
case filteringStatusRewritten:
|
||||
return res.Reason == dnsfilter.ReasonRewrite ||
|
||||
res.Reason == dnsfilter.RewriteEtcHosts
|
||||
case filteringStatusSafeSearch:
|
||||
return res.IsFiltered && res.Reason == dnsfilter.FilteredSafeSearch
|
||||
|
||||
case filteringStatusProcessed:
|
||||
return !(res.Reason == dnsfilter.FilteredBlackList ||
|
||||
res.Reason == dnsfilter.FilteredBlockedService ||
|
||||
res.Reason == dnsfilter.NotFilteredWhiteList)
|
||||
|
||||
default:
|
||||
return false
|
||||
}
|
||||
return c.ctFilteringStatusCase(entry.Result)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *searchCriteria) ctDomainOrClientCase(entry *logEntry) bool {
|
||||
qhost := strings.ToLower(entry.QHost)
|
||||
searchVal := strings.ToLower(c.value)
|
||||
if c.strict && qhost == searchVal {
|
||||
return true
|
||||
}
|
||||
if !c.strict && strings.Contains(qhost, searchVal) {
|
||||
return true
|
||||
}
|
||||
|
||||
if c.strict && entry.IP == c.value {
|
||||
return true
|
||||
}
|
||||
if !c.strict && strings.Contains(entry.IP, c.value) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *searchCriteria) ctFilteringStatusCase(res dnsfilter.Result) bool {
|
||||
switch c.value {
|
||||
case filteringStatusAll:
|
||||
return true
|
||||
|
||||
case filteringStatusFiltered:
|
||||
return res.IsFiltered ||
|
||||
res.Reason.In(
|
||||
dnsfilter.NotFilteredWhiteList,
|
||||
dnsfilter.ReasonRewrite,
|
||||
dnsfilter.RewriteEtcHosts,
|
||||
)
|
||||
|
||||
case filteringStatusBlocked:
|
||||
return res.IsFiltered &&
|
||||
res.Reason.In(dnsfilter.FilteredBlackList, dnsfilter.FilteredBlockedService)
|
||||
|
||||
case filteringStatusBlockedService:
|
||||
return res.IsFiltered && res.Reason == dnsfilter.FilteredBlockedService
|
||||
|
||||
case filteringStatusBlockedParental:
|
||||
return res.IsFiltered && res.Reason == dnsfilter.FilteredParental
|
||||
|
||||
case filteringStatusBlockedSafebrowsing:
|
||||
return res.IsFiltered && res.Reason == dnsfilter.FilteredSafeBrowsing
|
||||
|
||||
case filteringStatusWhitelisted:
|
||||
return res.Reason == dnsfilter.NotFilteredWhiteList
|
||||
|
||||
case filteringStatusRewritten:
|
||||
return res.Reason.In(dnsfilter.ReasonRewrite, dnsfilter.RewriteEtcHosts)
|
||||
|
||||
case filteringStatusSafeSearch:
|
||||
return res.IsFiltered && res.Reason == dnsfilter.FilteredSafeSearch
|
||||
|
||||
case filteringStatusProcessed:
|
||||
return !res.Reason.In(
|
||||
dnsfilter.FilteredBlackList,
|
||||
dnsfilter.FilteredBlockedService,
|
||||
dnsfilter.NotFilteredWhiteList,
|
||||
)
|
||||
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -548,7 +548,6 @@ func (s *statsCtx) loadUnits(limit uint32) ([]*unitDB, uint32) {
|
|||
* parental-blocked
|
||||
These values are just the sum of data for all units.
|
||||
*/
|
||||
// nolint (gocyclo)
|
||||
func (s *statsCtx) getData() map[string]interface{} {
|
||||
limit := s.conf.limit
|
||||
|
||||
|
@ -564,137 +563,63 @@ func (s *statsCtx) getData() map[string]interface{} {
|
|||
}
|
||||
|
||||
// per time unit counters:
|
||||
|
||||
// 720 hours may span 31 days, so we skip data for the first day in this case
|
||||
firstDayID := (firstID + 24 - 1) / 24 * 24 // align_ceil(24)
|
||||
|
||||
a := []uint64{}
|
||||
if timeUnit == Hours {
|
||||
for _, u := range units {
|
||||
a = append(a, u.NTotal)
|
||||
}
|
||||
} else {
|
||||
var sum uint64
|
||||
id := firstDayID
|
||||
nextDayID := firstDayID + 24
|
||||
for i := firstDayID - firstID; int(i) != len(units); i++ {
|
||||
sum += units[i].NTotal
|
||||
if id == nextDayID {
|
||||
a = append(a, sum)
|
||||
sum = 0
|
||||
nextDayID += 24
|
||||
statsCollector := func(numsGetter func(u *unitDB) (num uint64)) (nums []uint64) {
|
||||
if timeUnit == Hours {
|
||||
for _, u := range units {
|
||||
nums = append(nums, numsGetter(u))
|
||||
}
|
||||
id++
|
||||
}
|
||||
if id <= nextDayID {
|
||||
a = append(a, sum)
|
||||
}
|
||||
if len(a) != int(limit/24) {
|
||||
log.Fatalf("len(a) != limit: %d %d", len(a), limit)
|
||||
}
|
||||
}
|
||||
d["dns_queries"] = a
|
||||
|
||||
a = []uint64{}
|
||||
if timeUnit == Hours {
|
||||
for _, u := range units {
|
||||
a = append(a, u.NResult[RFiltered])
|
||||
}
|
||||
} else {
|
||||
var sum uint64
|
||||
id := firstDayID
|
||||
nextDayID := firstDayID + 24
|
||||
for i := firstDayID - firstID; int(i) != len(units); i++ {
|
||||
sum += units[i].NResult[RFiltered]
|
||||
if id == nextDayID {
|
||||
a = append(a, sum)
|
||||
sum = 0
|
||||
nextDayID += 24
|
||||
} else {
|
||||
var sum uint64
|
||||
id := firstDayID
|
||||
nextDayID := firstDayID + 24
|
||||
for i := int(firstDayID - firstID); i != len(units); i++ {
|
||||
sum += numsGetter(units[i])
|
||||
if id == nextDayID {
|
||||
nums = append(nums, sum)
|
||||
sum = 0
|
||||
nextDayID += 24
|
||||
}
|
||||
id++
|
||||
}
|
||||
id++
|
||||
}
|
||||
if id <= nextDayID {
|
||||
a = append(a, sum)
|
||||
}
|
||||
}
|
||||
d["blocked_filtering"] = a
|
||||
|
||||
a = []uint64{}
|
||||
if timeUnit == Hours {
|
||||
for _, u := range units {
|
||||
a = append(a, u.NResult[RSafeBrowsing])
|
||||
}
|
||||
} else {
|
||||
var sum uint64
|
||||
id := firstDayID
|
||||
nextDayID := firstDayID + 24
|
||||
for i := firstDayID - firstID; int(i) != len(units); i++ {
|
||||
sum += units[i].NResult[RSafeBrowsing]
|
||||
if id == nextDayID {
|
||||
a = append(a, sum)
|
||||
sum = 0
|
||||
nextDayID += 24
|
||||
if id <= nextDayID {
|
||||
nums = append(nums, sum)
|
||||
}
|
||||
id++
|
||||
}
|
||||
if id <= nextDayID {
|
||||
a = append(a, sum)
|
||||
}
|
||||
return nums
|
||||
}
|
||||
d["replaced_safebrowsing"] = a
|
||||
|
||||
a = []uint64{}
|
||||
if timeUnit == Hours {
|
||||
topsCollector := func(max int, pairsGetter func(u *unitDB) (pairs []countPair)) []map[string]uint64 {
|
||||
m := map[string]uint64{}
|
||||
for _, u := range units {
|
||||
a = append(a, u.NResult[RParental])
|
||||
}
|
||||
} else {
|
||||
var sum uint64
|
||||
id := firstDayID
|
||||
nextDayID := firstDayID + 24
|
||||
for i := firstDayID - firstID; int(i) != len(units); i++ {
|
||||
sum += units[i].NResult[RParental]
|
||||
if id == nextDayID {
|
||||
a = append(a, sum)
|
||||
sum = 0
|
||||
nextDayID += 24
|
||||
for _, it := range pairsGetter(u) {
|
||||
m[it.Name] += it.Count
|
||||
}
|
||||
id++
|
||||
}
|
||||
if id <= nextDayID {
|
||||
a = append(a, sum)
|
||||
}
|
||||
a2 := convertMapToArray(m, max)
|
||||
return convertTopArray(a2)
|
||||
}
|
||||
d["replaced_parental"] = a
|
||||
|
||||
// top counters:
|
||||
|
||||
m := map[string]uint64{}
|
||||
for _, u := range units {
|
||||
for _, it := range u.Domains {
|
||||
m[it.Name] += it.Count
|
||||
}
|
||||
dnsQueries := statsCollector(func(u *unitDB) (num uint64) { return u.NTotal })
|
||||
if timeUnit != Hours && len(dnsQueries) != int(limit/24) {
|
||||
log.Fatalf("len(dnsQueries) != limit: %d %d", len(dnsQueries), limit)
|
||||
}
|
||||
a2 := convertMapToArray(m, maxDomains)
|
||||
d["top_queried_domains"] = convertTopArray(a2)
|
||||
|
||||
m = map[string]uint64{}
|
||||
for _, u := range units {
|
||||
for _, it := range u.BlockedDomains {
|
||||
m[it.Name] += it.Count
|
||||
}
|
||||
statsData := map[string]interface{}{
|
||||
"dns_queries": dnsQueries,
|
||||
"blocked_filtering": statsCollector(func(u *unitDB) (num uint64) { return u.NResult[RFiltered] }),
|
||||
"replaced_safebrowsing": statsCollector(func(u *unitDB) (num uint64) { return u.NResult[RSafeBrowsing] }),
|
||||
"replaced_parental": statsCollector(func(u *unitDB) (num uint64) { return u.NResult[RParental] }),
|
||||
"top_queried_domains": topsCollector(maxDomains, func(u *unitDB) (pairs []countPair) { return u.Domains }),
|
||||
"top_blocked_domains": topsCollector(maxDomains, func(u *unitDB) (pairs []countPair) { return u.BlockedDomains }),
|
||||
"top_clients": topsCollector(maxClients, func(u *unitDB) (pairs []countPair) { return u.Clients }),
|
||||
}
|
||||
a2 = convertMapToArray(m, maxDomains)
|
||||
d["top_blocked_domains"] = convertTopArray(a2)
|
||||
|
||||
m = map[string]uint64{}
|
||||
for _, u := range units {
|
||||
for _, it := range u.Clients {
|
||||
m[it.Name] += it.Count
|
||||
}
|
||||
for dataKey, dataValue := range statsData {
|
||||
d[dataKey] = dataValue
|
||||
}
|
||||
a2 = convertMapToArray(m, maxClients)
|
||||
d["top_clients"] = convertTopArray(a2)
|
||||
|
||||
// total counters:
|
||||
|
||||
|
|
Loading…
Reference in a new issue