mirror of
https://github.com/AdguardTeam/AdGuardHome.git
synced 2024-11-21 12:35:33 +03:00
Pull request 1910: new-rulelist-parser
Squashed commit of the following:
commit bac0da6818388d67840b8fe9b633ce0804964ed9
Merge: cb6759b63 f7dd83251
Author: Ainar Garipov <A.Garipov@AdGuard.COM>
Date: Fri Jul 7 18:22:40 2023 +0300
Merge branch 'master' into new-rulelist-parser
commit cb6759b63546b35074ec0ae04769ddb5e83ebac1
Author: Ainar Garipov <A.Garipov@AdGuard.COM>
Date: Fri Jul 7 12:18:44 2023 +0300
all: upd tools
commit d28bf4cb42057b84e0c1325389db121a91f7c989
Author: Ainar Garipov <A.Garipov@AdGuard.COM>
Date: Thu Jul 6 19:35:48 2023 +0300
all: upd chlog
commit 7df637b00331dff5810c3a76f4a7d2cee24148f1
Author: Ainar Garipov <A.Garipov@AdGuard.COM>
Date: Thu Jul 6 19:30:42 2023 +0300
rulelist: fix tabs
commit 0598d0d43504b246570e9ee76d79dff0d86413c5
Author: Ainar Garipov <A.Garipov@AdGuard.COM>
Date: Thu Jul 6 19:06:18 2023 +0300
all: add go-bench, go-fuzz; imp docs
commit a9ab7726048e216b13876a85991f3e3e8696a029
Author: Ainar Garipov <A.Garipov@AdGuard.COM>
Date: Thu Jul 6 18:18:14 2023 +0300
filtering: add new parser
This commit is contained in:
parent
f7dd832517
commit
7b92d53b84
21 changed files with 759 additions and 259 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -9,6 +9,7 @@
|
|||
*.db
|
||||
*.log
|
||||
*.snap
|
||||
*.test
|
||||
/agh-backup/
|
||||
/bin/
|
||||
/build/*
|
||||
|
|
|
@ -25,6 +25,8 @@ NOTE: Add new changes BELOW THIS COMMENT.
|
|||
|
||||
### Changed
|
||||
|
||||
- Improved CPU and RAM consumption during updates of filtering-rule lists.
|
||||
|
||||
#### Configuration Changes
|
||||
|
||||
In this release, the schema version has changed from 23 to 24.
|
||||
|
|
4
Makefile
4
Makefile
|
@ -78,7 +78,7 @@ build: deps quick-build
|
|||
|
||||
quick-build: js-build go-build
|
||||
|
||||
ci: deps test
|
||||
ci: deps test go-bench go-fuzz
|
||||
|
||||
deps: js-deps go-deps
|
||||
lint: js-lint go-lint
|
||||
|
@ -104,8 +104,10 @@ js-deps:
|
|||
js-lint: ; $(NPM) $(NPM_FLAGS) run lint
|
||||
js-test: ; $(NPM) $(NPM_FLAGS) run test
|
||||
|
||||
go-bench: ; $(ENV) "$(SHELL)" ./scripts/make/go-bench.sh
|
||||
go-build: ; $(ENV) "$(SHELL)" ./scripts/make/go-build.sh
|
||||
go-deps: ; $(ENV) "$(SHELL)" ./scripts/make/go-deps.sh
|
||||
go-fuzz: ; $(ENV) "$(SHELL)" ./scripts/make/go-fuzz.sh
|
||||
go-lint: ; $(ENV) "$(SHELL)" ./scripts/make/go-lint.sh
|
||||
go-tools: ; $(ENV) "$(SHELL)" ./scripts/make/go-tools.sh
|
||||
|
||||
|
|
|
@ -2,8 +2,8 @@ package aghtest
|
|||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net"
|
||||
|
||||
"github.com/AdguardTeam/AdGuardHome/internal/aghos"
|
||||
"github.com/AdguardTeam/AdGuardHome/internal/next/agh"
|
||||
|
@ -19,23 +19,23 @@ import (
|
|||
|
||||
// Package fs
|
||||
|
||||
// type check
|
||||
var _ fs.FS = &FS{}
|
||||
|
||||
// FS is a mock [fs.FS] implementation for tests.
|
||||
// FS is a fake [fs.FS] implementation for tests.
|
||||
type FS struct {
|
||||
OnOpen func(name string) (fs.File, error)
|
||||
}
|
||||
|
||||
// type check
|
||||
var _ fs.FS = (*FS)(nil)
|
||||
|
||||
// Open implements the [fs.FS] interface for *FS.
|
||||
func (fsys *FS) Open(name string) (fs.File, error) {
|
||||
return fsys.OnOpen(name)
|
||||
}
|
||||
|
||||
// type check
|
||||
var _ fs.GlobFS = &GlobFS{}
|
||||
var _ fs.GlobFS = (*GlobFS)(nil)
|
||||
|
||||
// GlobFS is a mock [fs.GlobFS] implementation for tests.
|
||||
// GlobFS is a fake [fs.GlobFS] implementation for tests.
|
||||
type GlobFS struct {
|
||||
// FS is embedded here to avoid implementing all it's methods.
|
||||
FS
|
||||
|
@ -48,9 +48,9 @@ func (fsys *GlobFS) Glob(pattern string) ([]string, error) {
|
|||
}
|
||||
|
||||
// type check
|
||||
var _ fs.StatFS = &StatFS{}
|
||||
var _ fs.StatFS = (*StatFS)(nil)
|
||||
|
||||
// StatFS is a mock [fs.StatFS] implementation for tests.
|
||||
// StatFS is a fake [fs.StatFS] implementation for tests.
|
||||
type StatFS struct {
|
||||
// FS is embedded here to avoid implementing all it's methods.
|
||||
FS
|
||||
|
@ -62,47 +62,34 @@ func (fsys *StatFS) Stat(name string) (fs.FileInfo, error) {
|
|||
return fsys.OnStat(name)
|
||||
}
|
||||
|
||||
// Package net
|
||||
// Package io
|
||||
|
||||
// type check
|
||||
var _ net.Listener = (*Listener)(nil)
|
||||
|
||||
// Listener is a mock [net.Listener] implementation for tests.
|
||||
type Listener struct {
|
||||
OnAccept func() (conn net.Conn, err error)
|
||||
OnAddr func() (addr net.Addr)
|
||||
OnClose func() (err error)
|
||||
// Writer is a fake [io.Writer] implementation for tests.
|
||||
type Writer struct {
|
||||
OnWrite func(b []byte) (n int, err error)
|
||||
}
|
||||
|
||||
// Accept implements the [net.Listener] interface for *Listener.
|
||||
func (l *Listener) Accept() (conn net.Conn, err error) {
|
||||
return l.OnAccept()
|
||||
}
|
||||
var _ io.Writer = (*Writer)(nil)
|
||||
|
||||
// Addr implements the [net.Listener] interface for *Listener.
|
||||
func (l *Listener) Addr() (addr net.Addr) {
|
||||
return l.OnAddr()
|
||||
}
|
||||
|
||||
// Close implements the [net.Listener] interface for *Listener.
|
||||
func (l *Listener) Close() (err error) {
|
||||
return l.OnClose()
|
||||
// Write implements the [io.Writer] interface for *Writer.
|
||||
func (w *Writer) Write(b []byte) (n int, err error) {
|
||||
return w.OnWrite(b)
|
||||
}
|
||||
|
||||
// Module adguard-home
|
||||
|
||||
// Package aghos
|
||||
|
||||
// type check
|
||||
var _ aghos.FSWatcher = (*FSWatcher)(nil)
|
||||
|
||||
// FSWatcher is a mock [aghos.FSWatcher] implementation for tests.
|
||||
// FSWatcher is a fake [aghos.FSWatcher] implementation for tests.
|
||||
type FSWatcher struct {
|
||||
OnEvents func() (e <-chan struct{})
|
||||
OnAdd func(name string) (err error)
|
||||
OnClose func() (err error)
|
||||
}
|
||||
|
||||
// type check
|
||||
var _ aghos.FSWatcher = (*FSWatcher)(nil)
|
||||
|
||||
// Events implements the [aghos.FSWatcher] interface for *FSWatcher.
|
||||
func (w *FSWatcher) Events() (e <-chan struct{}) {
|
||||
return w.OnEvents()
|
||||
|
@ -120,16 +107,16 @@ func (w *FSWatcher) Close() (err error) {
|
|||
|
||||
// Package agh
|
||||
|
||||
// type check
|
||||
var _ agh.ServiceWithConfig[struct{}] = (*ServiceWithConfig[struct{}])(nil)
|
||||
|
||||
// ServiceWithConfig is a mock [agh.ServiceWithConfig] implementation for tests.
|
||||
// ServiceWithConfig is a fake [agh.ServiceWithConfig] implementation for tests.
|
||||
type ServiceWithConfig[ConfigType any] struct {
|
||||
OnStart func() (err error)
|
||||
OnShutdown func(ctx context.Context) (err error)
|
||||
OnConfig func() (c ConfigType)
|
||||
}
|
||||
|
||||
// type check
|
||||
var _ agh.ServiceWithConfig[struct{}] = (*ServiceWithConfig[struct{}])(nil)
|
||||
|
||||
// Start implements the [agh.ServiceWithConfig] interface for
|
||||
// *ServiceWithConfig.
|
||||
func (s *ServiceWithConfig[_]) Start() (err error) {
|
||||
|
@ -152,10 +139,7 @@ func (s *ServiceWithConfig[ConfigType]) Config() (c ConfigType) {
|
|||
|
||||
// Package upstream
|
||||
|
||||
// type check
|
||||
var _ upstream.Upstream = (*UpstreamMock)(nil)
|
||||
|
||||
// UpstreamMock is a mock [upstream.Upstream] implementation for tests.
|
||||
// UpstreamMock is a fake [upstream.Upstream] implementation for tests.
|
||||
//
|
||||
// TODO(a.garipov): Replace with all uses of Upstream with UpstreamMock and
|
||||
// rename it to just Upstream.
|
||||
|
@ -165,6 +149,9 @@ type UpstreamMock struct {
|
|||
OnClose func() (err error)
|
||||
}
|
||||
|
||||
// type check
|
||||
var _ upstream.Upstream = (*UpstreamMock)(nil)
|
||||
|
||||
// Address implements the [upstream.Upstream] interface for *UpstreamMock.
|
||||
func (u *UpstreamMock) Address() (addr string) {
|
||||
return u.OnAddress()
|
||||
|
|
|
@ -1,10 +1,7 @@
|
|||
package filtering
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
@ -14,6 +11,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/AdguardTeam/AdGuardHome/internal/aghalg"
|
||||
"github.com/AdguardTeam/AdGuardHome/internal/filtering/rulelist"
|
||||
"github.com/AdguardTeam/golibs/errors"
|
||||
"github.com/AdguardTeam/golibs/log"
|
||||
"github.com/AdguardTeam/golibs/stringutil"
|
||||
|
@ -29,9 +27,9 @@ const filterDir = "filters"
|
|||
// TODO(e.burkov): Use more deterministic approach.
|
||||
var nextFilterID = time.Now().Unix()
|
||||
|
||||
// FilterYAML respresents a filter list in the configuration file.
|
||||
// FilterYAML represents a filter list in the configuration file.
|
||||
//
|
||||
// TODO(e.burkov): Investigate if the field oredering is important.
|
||||
// TODO(e.burkov): Investigate if the field ordering is important.
|
||||
type FilterYAML struct {
|
||||
Enabled bool
|
||||
URL string // URL or a file path
|
||||
|
@ -213,7 +211,7 @@ func (d *DNSFilter) loadFilters(array []FilterYAML) {
|
|||
|
||||
err := d.load(filter)
|
||||
if err != nil {
|
||||
log.Error("Couldn't load filter %d contents due to %s", filter.ID, err)
|
||||
log.Error("filtering: loading filter %d: %s", filter.ID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -338,7 +336,8 @@ func (d *DNSFilter) refreshFiltersArray(filters *[]FilterYAML, force bool) (int,
|
|||
updateFlags = append(updateFlags, updated)
|
||||
if err != nil {
|
||||
nfail++
|
||||
log.Printf("Failed to update filter %s: %s\n", uf.URL, err)
|
||||
log.Info("filtering: updating filter from url %q: %s\n", uf.URL, err)
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -367,7 +366,13 @@ func (d *DNSFilter) refreshFiltersArray(filters *[]FilterYAML, force bool) (int,
|
|||
continue
|
||||
}
|
||||
|
||||
log.Info("Updated filter #%d. Rules: %d -> %d", f.ID, f.RulesCount, uf.RulesCount)
|
||||
log.Info(
|
||||
"filtering: updated filter %d; rule count: %d (was %d)",
|
||||
f.ID,
|
||||
uf.RulesCount,
|
||||
f.RulesCount,
|
||||
)
|
||||
|
||||
f.Name = uf.Name
|
||||
f.RulesCount = uf.RulesCount
|
||||
f.checksum = uf.checksum
|
||||
|
@ -397,9 +402,10 @@ func (d *DNSFilter) refreshFiltersArray(filters *[]FilterYAML, force bool) (int,
|
|||
//
|
||||
// TODO(a.garipov, e.burkov): What the hell?
|
||||
func (d *DNSFilter) refreshFiltersIntl(block, allow, force bool) (int, bool) {
|
||||
log.Debug("filtering: updating...")
|
||||
|
||||
updNum := 0
|
||||
log.Debug("filtering: starting updating")
|
||||
defer func() { log.Debug("filtering: finished updating, %d updated", updNum) }()
|
||||
|
||||
var lists []FilterYAML
|
||||
var toUpd []bool
|
||||
isNetErr := false
|
||||
|
@ -437,131 +443,9 @@ func (d *DNSFilter) refreshFiltersIntl(block, allow, force bool) (int, bool) {
|
|||
}
|
||||
}
|
||||
|
||||
log.Debug("filtering: update finished: %d lists updated", updNum)
|
||||
|
||||
return updNum, false
|
||||
}
|
||||
|
||||
// isPrintableText returns true if data is printable UTF-8 text with CR, LF, TAB
|
||||
// characters.
|
||||
//
|
||||
// TODO(e.burkov): Investigate the purpose of this and improve the
|
||||
// implementation. Perhaps, use something from the unicode package.
|
||||
func isPrintableText(data string) (ok bool) {
|
||||
for _, c := range []byte(data) {
|
||||
if (c >= ' ' && c != 0x7f) || c == '\n' || c == '\r' || c == '\t' {
|
||||
continue
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// scanLinesWithBreak is essentially a [bufio.ScanLines] which keeps trailing
|
||||
// line breaks.
|
||||
func scanLinesWithBreak(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
if i := bytes.IndexByte(data, '\n'); i >= 0 {
|
||||
return i + 1, data[0 : i+1], nil
|
||||
}
|
||||
|
||||
if atEOF {
|
||||
return len(data), data, nil
|
||||
}
|
||||
|
||||
// Request more data.
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
// parseFilter copies filter's content from src to dst and returns the number of
|
||||
// rules, number of bytes written, checksum, and title of the parsed list. dst
|
||||
// must not be nil.
|
||||
func (d *DNSFilter) parseFilter(
|
||||
src io.Reader,
|
||||
dst io.Writer,
|
||||
) (rulesNum, written int, checksum uint32, title string, err error) {
|
||||
scanner := bufio.NewScanner(src)
|
||||
scanner.Split(scanLinesWithBreak)
|
||||
|
||||
titleFound := false
|
||||
for n := 0; scanner.Scan(); written += n {
|
||||
line := scanner.Text()
|
||||
var isRule bool
|
||||
var likelyTitle string
|
||||
isRule, likelyTitle, err = d.parseFilterLine(line, !titleFound, written == 0)
|
||||
if err != nil {
|
||||
return 0, written, 0, "", err
|
||||
}
|
||||
|
||||
if isRule {
|
||||
rulesNum++
|
||||
} else if likelyTitle != "" {
|
||||
title, titleFound = likelyTitle, true
|
||||
}
|
||||
|
||||
checksum = crc32.Update(checksum, crc32.IEEETable, []byte(line))
|
||||
|
||||
n, err = dst.Write([]byte(line))
|
||||
if err != nil {
|
||||
return 0, written, 0, "", fmt.Errorf("writing filter line: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = scanner.Err(); err != nil {
|
||||
return 0, written, 0, "", fmt.Errorf("scanning filter contents: %w", err)
|
||||
}
|
||||
|
||||
return rulesNum, written, checksum, title, nil
|
||||
}
|
||||
|
||||
// parseFilterLine returns true if the passed line is a rule. line is
|
||||
// considered a rule if it's not a comment and contains no title.
|
||||
func (d *DNSFilter) parseFilterLine(
|
||||
line string,
|
||||
lookForTitle bool,
|
||||
testHTML bool,
|
||||
) (isRule bool, title string, err error) {
|
||||
if !isPrintableText(line) {
|
||||
return false, "", errors.Error("filter contains non-printable characters")
|
||||
}
|
||||
|
||||
line = strings.TrimSpace(line)
|
||||
if line == "" || line[0] == '#' {
|
||||
return false, "", nil
|
||||
}
|
||||
|
||||
if testHTML && isHTML(line) {
|
||||
return false, "", errors.Error("data is HTML, not plain text")
|
||||
}
|
||||
|
||||
if line[0] == '!' && lookForTitle {
|
||||
match := d.filterTitleRegexp.FindStringSubmatch(line)
|
||||
if len(match) > 1 {
|
||||
title = match[1]
|
||||
}
|
||||
|
||||
return false, title, nil
|
||||
}
|
||||
|
||||
return true, "", nil
|
||||
}
|
||||
|
||||
// isHTML returns true if the line contains HTML tags instead of plain text.
|
||||
// line shouldn have no leading space symbols.
|
||||
//
|
||||
// TODO(ameshkov): It actually gives too much false-positives. Perhaps, just
|
||||
// check if trimmed string begins with angle bracket.
|
||||
func isHTML(line string) (ok bool) {
|
||||
line = strings.ToLower(line)
|
||||
|
||||
return strings.HasPrefix(line, "<html") || strings.HasPrefix(line, "<!doctype")
|
||||
}
|
||||
|
||||
// update refreshes filter's content and a/mtimes of it's file.
|
||||
func (d *DNSFilter) update(filter *FilterYAML) (b bool, err error) {
|
||||
b, err = d.updateIntl(filter)
|
||||
|
@ -573,7 +457,7 @@ func (d *DNSFilter) update(filter *FilterYAML) (b bool, err error) {
|
|||
filter.LastUpdated,
|
||||
)
|
||||
if chErr != nil {
|
||||
log.Error("os.Chtimes(): %v", chErr)
|
||||
log.Error("filtering: os.Chtimes(): %s", chErr)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -582,14 +466,12 @@ func (d *DNSFilter) update(filter *FilterYAML) (b bool, err error) {
|
|||
|
||||
// finalizeUpdate closes and gets rid of temporary file f with filter's content
|
||||
// according to updated. It also saves new values of flt's name, rules number
|
||||
// and checksum if sucсeeded.
|
||||
// and checksum if succeeded.
|
||||
func (d *DNSFilter) finalizeUpdate(
|
||||
file *os.File,
|
||||
flt *FilterYAML,
|
||||
updated bool,
|
||||
name string,
|
||||
rnum int,
|
||||
cs uint32,
|
||||
res *rulelist.ParseResult,
|
||||
) (err error) {
|
||||
tmpFileName := file.Name()
|
||||
|
||||
|
@ -602,23 +484,24 @@ func (d *DNSFilter) finalizeUpdate(
|
|||
}
|
||||
|
||||
if !updated {
|
||||
log.Tracef("filter #%d from %s has no changes, skip", flt.ID, flt.URL)
|
||||
log.Debug("filtering: filter %d from url %q has no changes, skipping", flt.ID, flt.URL)
|
||||
|
||||
return os.Remove(tmpFileName)
|
||||
}
|
||||
|
||||
fltPath := flt.Path(d.DataDir)
|
||||
|
||||
log.Printf("saving contents of filter #%d into %s", flt.ID, fltPath)
|
||||
log.Info("filtering: saving contents of filter %d into %q", flt.ID, fltPath)
|
||||
|
||||
// Don't use renamio or maybe packages, since those will require loading the
|
||||
// whole filter content to the memory on Windows.
|
||||
// Don't use renameio or maybe packages, since those will require loading
|
||||
// the whole filter content to the memory on Windows.
|
||||
err = os.Rename(tmpFileName, fltPath)
|
||||
if err != nil {
|
||||
return errors.WithDeferred(err, os.Remove(tmpFileName))
|
||||
}
|
||||
|
||||
flt.Name, flt.checksum, flt.RulesCount = aghalg.Coalesce(flt.Name, name), cs, rnum
|
||||
flt.Name = aghalg.Coalesce(flt.Name, res.Title)
|
||||
flt.checksum, flt.RulesCount = res.Checksum, res.RulesCount
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -626,11 +509,9 @@ func (d *DNSFilter) finalizeUpdate(
|
|||
// updateIntl updates the flt rewriting it's actual file. It returns true if
|
||||
// the actual update has been performed.
|
||||
func (d *DNSFilter) updateIntl(flt *FilterYAML) (ok bool, err error) {
|
||||
log.Tracef("downloading update for filter %d from %s", flt.ID, flt.URL)
|
||||
log.Debug("filtering: downloading update for filter %d from %q", flt.ID, flt.URL)
|
||||
|
||||
var name string
|
||||
var rnum, n int
|
||||
var cs uint32
|
||||
var res *rulelist.ParseResult
|
||||
|
||||
var tmpFile *os.File
|
||||
tmpFile, err = os.CreateTemp(filepath.Join(d.DataDir, filterDir), "")
|
||||
|
@ -638,9 +519,14 @@ func (d *DNSFilter) updateIntl(flt *FilterYAML) (ok bool, err error) {
|
|||
return false, err
|
||||
}
|
||||
defer func() {
|
||||
finErr := d.finalizeUpdate(tmpFile, flt, ok, name, rnum, cs)
|
||||
finErr := d.finalizeUpdate(tmpFile, flt, ok, res)
|
||||
if ok && finErr == nil {
|
||||
log.Printf("updated filter %d: %d bytes, %d rules", flt.ID, n, rnum)
|
||||
log.Info(
|
||||
"filtering: updated filter %d: %d bytes, %d rules",
|
||||
flt.ID,
|
||||
res.BytesWritten,
|
||||
res.RulesCount,
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -661,14 +547,14 @@ func (d *DNSFilter) updateIntl(flt *FilterYAML) (ok bool, err error) {
|
|||
var resp *http.Response
|
||||
resp, err = d.HTTPClient.Get(flt.URL)
|
||||
if err != nil {
|
||||
log.Printf("requesting filter from %s, skip: %s", flt.URL, err)
|
||||
log.Info("filtering: requesting filter from %q: %s, skipping", flt.URL, err)
|
||||
|
||||
return false, err
|
||||
}
|
||||
defer func() { err = errors.WithDeferred(err, resp.Body.Close()) }()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
log.Printf("got status code %d from %s, skip", resp.StatusCode, flt.URL)
|
||||
log.Info("filtering got status code %d from %q, skipping", resp.StatusCode, flt.URL)
|
||||
|
||||
return false, fmt.Errorf("got status code %d, want %d", resp.StatusCode, http.StatusOK)
|
||||
}
|
||||
|
@ -685,16 +571,20 @@ func (d *DNSFilter) updateIntl(flt *FilterYAML) (ok bool, err error) {
|
|||
r = f
|
||||
}
|
||||
|
||||
rnum, n, cs, name, err = d.parseFilter(r, tmpFile)
|
||||
bufPtr := d.bufPool.Get().(*[]byte)
|
||||
defer d.bufPool.Put(bufPtr)
|
||||
|
||||
return cs != flt.checksum && err == nil, err
|
||||
p := rulelist.NewParser()
|
||||
res, err = p.Parse(tmpFile, r, *bufPtr)
|
||||
|
||||
return res.Checksum != flt.checksum && err == nil, err
|
||||
}
|
||||
|
||||
// loads filter contents from the file in dataDir
|
||||
func (d *DNSFilter) load(flt *FilterYAML) (err error) {
|
||||
fileName := flt.Path(d.DataDir)
|
||||
|
||||
log.Debug("filtering: loading filter %d from %s", flt.ID, fileName)
|
||||
log.Debug("filtering: loading filter %d from %q", flt.ID, fileName)
|
||||
|
||||
file, err := os.Open(fileName)
|
||||
if errors.Is(err, os.ErrNotExist) {
|
||||
|
@ -710,14 +600,18 @@ func (d *DNSFilter) load(flt *FilterYAML) (err error) {
|
|||
return fmt.Errorf("getting filter file stat: %w", err)
|
||||
}
|
||||
|
||||
log.Debug("filtering: file %s, id %d, length %d", fileName, flt.ID, st.Size())
|
||||
log.Debug("filtering: file %q, id %d, length %d", fileName, flt.ID, st.Size())
|
||||
|
||||
rulesCount, _, checksum, _, err := d.parseFilter(file, io.Discard)
|
||||
bufPtr := d.bufPool.Get().(*[]byte)
|
||||
defer d.bufPool.Put(bufPtr)
|
||||
|
||||
p := rulelist.NewParser()
|
||||
res, err := p.Parse(io.Discard, file, *bufPtr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing filter file: %w", err)
|
||||
}
|
||||
|
||||
flt.RulesCount, flt.checksum, flt.LastUpdated = rulesCount, checksum, st.ModTime()
|
||||
flt.RulesCount, flt.checksum, flt.LastUpdated = res.RulesCount, res.Checksum, st.ModTime()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -759,8 +653,9 @@ func (d *DNSFilter) enableFiltersLocked(async bool) {
|
|||
})
|
||||
}
|
||||
|
||||
if err := d.SetFilters(filters, allowFilters, async); err != nil {
|
||||
log.Debug("enabling filters: %s", err)
|
||||
err := d.setFilters(filters, allowFilters, async)
|
||||
if err != nil {
|
||||
log.Error("filtering: enabling filters: %s", err)
|
||||
}
|
||||
|
||||
d.SetEnabled(d.FilteringEnabled)
|
||||
|
|
|
@ -9,7 +9,6 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
|
@ -18,6 +17,7 @@ import (
|
|||
|
||||
"github.com/AdguardTeam/AdGuardHome/internal/aghhttp"
|
||||
"github.com/AdguardTeam/AdGuardHome/internal/aghnet"
|
||||
"github.com/AdguardTeam/AdGuardHome/internal/filtering/rulelist"
|
||||
"github.com/AdguardTeam/golibs/errors"
|
||||
"github.com/AdguardTeam/golibs/log"
|
||||
"github.com/AdguardTeam/golibs/mathutil"
|
||||
|
@ -170,6 +170,15 @@ type Checker interface {
|
|||
|
||||
// DNSFilter matches hostnames and DNS requests against filtering rules.
|
||||
type DNSFilter struct {
|
||||
// bufPool is a pool of buffers used for filtering-rule list parsing.
|
||||
bufPool *sync.Pool
|
||||
|
||||
rulesStorage *filterlist.RuleStorage
|
||||
filteringEngine *urlfilter.DNSEngine
|
||||
|
||||
rulesStorageAllow *filterlist.RuleStorage
|
||||
filteringEngineAllow *urlfilter.DNSEngine
|
||||
|
||||
safeSearch SafeSearch
|
||||
|
||||
// safeBrowsingChecker is the safe browsing hash-prefix checker.
|
||||
|
@ -178,12 +187,6 @@ type DNSFilter struct {
|
|||
// parentalControl is the parental control hash-prefix checker.
|
||||
parentalControlChecker Checker
|
||||
|
||||
rulesStorage *filterlist.RuleStorage
|
||||
filteringEngine *urlfilter.DNSEngine
|
||||
|
||||
rulesStorageAllow *filterlist.RuleStorage
|
||||
filteringEngineAllow *urlfilter.DNSEngine
|
||||
|
||||
engineLock sync.RWMutex
|
||||
|
||||
Config // for direct access by library users, even a = assignment
|
||||
|
@ -196,12 +199,6 @@ type DNSFilter struct {
|
|||
|
||||
refreshLock *sync.Mutex
|
||||
|
||||
// filterTitleRegexp is the regular expression to retrieve a name of a
|
||||
// filter list.
|
||||
//
|
||||
// TODO(e.burkov): Don't use regexp for such a simple text processing task.
|
||||
filterTitleRegexp *regexp.Regexp
|
||||
|
||||
hostCheckers []hostChecker
|
||||
}
|
||||
|
||||
|
@ -339,12 +336,12 @@ func cloneRewrites(entries []*LegacyRewrite) (clone []*LegacyRewrite) {
|
|||
return clone
|
||||
}
|
||||
|
||||
// SetFilters sets new filters, synchronously or asynchronously. When filters
|
||||
// setFilters sets new filters, synchronously or asynchronously. When filters
|
||||
// are set asynchronously, the old filters continue working until the new
|
||||
// filters are ready.
|
||||
//
|
||||
// In this case the caller must ensure that the old filter files are intact.
|
||||
func (d *DNSFilter) SetFilters(blockFilters, allowFilters []Filter, async bool) error {
|
||||
func (d *DNSFilter) setFilters(blockFilters, allowFilters []Filter, async bool) error {
|
||||
if async {
|
||||
params := filtersInitializerParams{
|
||||
allowFilters: allowFilters,
|
||||
|
@ -370,14 +367,7 @@ func (d *DNSFilter) SetFilters(blockFilters, allowFilters []Filter, async bool)
|
|||
return nil
|
||||
}
|
||||
|
||||
err := d.initFiltering(allowFilters, blockFilters)
|
||||
if err != nil {
|
||||
log.Error("filtering: can't initialize filtering subsystem: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return d.initFiltering(allowFilters, blockFilters)
|
||||
}
|
||||
|
||||
// Starts initializing new filters by signal from channel
|
||||
|
@ -386,7 +376,8 @@ func (d *DNSFilter) filtersInitializer() {
|
|||
params := <-d.filtersInitializerChan
|
||||
err := d.initFiltering(params.allowFilters, params.blockFilters)
|
||||
if err != nil {
|
||||
log.Error("Can't initialize filtering subsystem: %s", err)
|
||||
log.Error("filtering: initializing: %s", err)
|
||||
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -718,7 +709,7 @@ func newRuleStorage(filters []Filter) (rs *filterlist.RuleStorage, err error) {
|
|||
}
|
||||
|
||||
// Initialize urlfilter objects.
|
||||
func (d *DNSFilter) initFiltering(allowFilters, blockFilters []Filter) error {
|
||||
func (d *DNSFilter) initFiltering(allowFilters, blockFilters []Filter) (err error) {
|
||||
rulesStorage, err := newRuleStorage(blockFilters)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -745,7 +736,8 @@ func (d *DNSFilter) initFiltering(allowFilters, blockFilters []Filter) error {
|
|||
|
||||
// Make sure that the OS reclaims memory as soon as possible.
|
||||
debug.FreeOSMemory()
|
||||
log.Debug("initialized filtering engine")
|
||||
|
||||
log.Debug("filtering: initialized filtering engine")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -949,8 +941,14 @@ func InitModule() {
|
|||
// be non-nil.
|
||||
func New(c *Config, blockFilters []Filter) (d *DNSFilter, err error) {
|
||||
d = &DNSFilter{
|
||||
bufPool: &sync.Pool{
|
||||
New: func() (buf any) {
|
||||
bufVal := make([]byte, rulelist.MaxRuleLen)
|
||||
|
||||
return &bufVal
|
||||
},
|
||||
},
|
||||
refreshLock: &sync.Mutex{},
|
||||
filterTitleRegexp: regexp.MustCompile(`^! Title: +(.*)$`),
|
||||
safeBrowsingChecker: c.SafeBrowsingChecker,
|
||||
parentalControlChecker: c.ParentalControlChecker,
|
||||
}
|
||||
|
@ -1047,7 +1045,7 @@ func (d *DNSFilter) checkSafeBrowsing(
|
|||
|
||||
if log.GetLevel() >= log.DEBUG {
|
||||
timer := log.StartTimer()
|
||||
defer timer.LogElapsed("safebrowsing lookup for %q", host)
|
||||
defer timer.LogElapsed("filtering: safebrowsing lookup for %q", host)
|
||||
}
|
||||
|
||||
res = Result{
|
||||
|
@ -1079,7 +1077,7 @@ func (d *DNSFilter) checkParental(
|
|||
|
||||
if log.GetLevel() >= log.DEBUG {
|
||||
timer := log.StartTimer()
|
||||
defer timer.LogElapsed("parental lookup for %q", host)
|
||||
defer timer.LogElapsed("filtering: parental lookup for %q", host)
|
||||
}
|
||||
|
||||
res = Result{
|
||||
|
|
|
@ -547,7 +547,7 @@ func TestWhitelist(t *testing.T) {
|
|||
}}
|
||||
d, setts := newForTest(t, nil, filters)
|
||||
|
||||
err := d.SetFilters(filters, whiteFilters, false)
|
||||
err := d.setFilters(filters, whiteFilters, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(d.Close)
|
||||
|
|
|
@ -95,7 +95,7 @@ func (d *DNSFilter) handleFilteringAddURL(w http.ResponseWriter, r *http.Request
|
|||
r,
|
||||
w,
|
||||
http.StatusBadRequest,
|
||||
"Couldn't fetch filter from url %s: %s",
|
||||
"Couldn't fetch filter from URL %q: %s",
|
||||
filt.URL,
|
||||
err,
|
||||
)
|
||||
|
|
|
@ -122,7 +122,7 @@ func matchDomainWildcard(host, wildcard string) (ok bool) {
|
|||
return isWildcard(wildcard) && strings.HasSuffix(host, wildcard[1:])
|
||||
}
|
||||
|
||||
// legacyRewriteSortsBefore sorts rewirtes according to the following priority:
|
||||
// legacyRewriteSortsBefore sorts rewrites according to the following priority:
|
||||
//
|
||||
// 1. A and AAAA > CNAME;
|
||||
// 2. wildcard > exact;
|
||||
|
|
9
internal/filtering/rulelist/error.go
Normal file
9
internal/filtering/rulelist/error.go
Normal file
|
@ -0,0 +1,9 @@
|
|||
package rulelist
|
||||
|
||||
import "github.com/AdguardTeam/golibs/errors"
|
||||
|
||||
// ErrHTML is returned by [Parser.Parse] if the data is likely to be HTML.
|
||||
//
|
||||
// TODO(a.garipov): This error is currently returned to the UI. Stop that and
|
||||
// make it all-lowercase.
|
||||
const ErrHTML errors.Error = "data is HTML, not plain text"
|
184
internal/filtering/rulelist/parser.go
Normal file
184
internal/filtering/rulelist/parser.go
Normal file
|
@ -0,0 +1,184 @@
|
|||
package rulelist
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"unicode"
|
||||
|
||||
"github.com/AdguardTeam/golibs/errors"
|
||||
)
|
||||
|
||||
// Parser is a filtering-rule parser that collects data, such as the checksum
|
||||
// and the title, as well as counts rules and removes comments.
|
||||
type Parser struct {
|
||||
title string
|
||||
rulesCount int
|
||||
written int
|
||||
checksum uint32
|
||||
titleFound bool
|
||||
}
|
||||
|
||||
// NewParser returns a new filtering-rule parser.
|
||||
func NewParser() (p *Parser) {
|
||||
return &Parser{}
|
||||
}
|
||||
|
||||
// ParseResult contains information about the results of parsing a
|
||||
// filtering-rule list by [Parser.Parse].
|
||||
type ParseResult struct {
|
||||
// Title is the title contained within the filtering-rule list, if any.
|
||||
Title string
|
||||
|
||||
// RulesCount is the number of rules in the list. It excludes empty lines
|
||||
// and comments.
|
||||
RulesCount int
|
||||
|
||||
// BytesWritten is the number of bytes written to dst.
|
||||
BytesWritten int
|
||||
|
||||
// Checksum is the CRC-32 checksum of the rules content. That is, excluding
|
||||
// empty lines and comments.
|
||||
Checksum uint32
|
||||
}
|
||||
|
||||
// Parse parses data from src into dst using buf during parsing. r is never
|
||||
// nil.
|
||||
func (p *Parser) Parse(dst io.Writer, src io.Reader, buf []byte) (r *ParseResult, err error) {
|
||||
s := bufio.NewScanner(src)
|
||||
s.Buffer(buf, MaxRuleLen)
|
||||
|
||||
lineIdx := 0
|
||||
for s.Scan() {
|
||||
var n int
|
||||
n, err = p.processLine(dst, s.Bytes(), lineIdx)
|
||||
p.written += n
|
||||
if err != nil {
|
||||
// Don't wrap the error, because it's informative enough as is.
|
||||
return p.result(), err
|
||||
}
|
||||
|
||||
lineIdx++
|
||||
}
|
||||
|
||||
r = p.result()
|
||||
err = s.Err()
|
||||
|
||||
return r, errors.Annotate(err, "scanning filter contents: %w")
|
||||
}
|
||||
|
||||
// result returns the current parsing result.
|
||||
func (p *Parser) result() (r *ParseResult) {
|
||||
return &ParseResult{
|
||||
Title: p.title,
|
||||
RulesCount: p.rulesCount,
|
||||
BytesWritten: p.written,
|
||||
Checksum: p.checksum,
|
||||
}
|
||||
}
|
||||
|
||||
// processLine processes a single line. It may write to dst, and if it does, n
|
||||
// is the number of bytes written.
|
||||
func (p *Parser) processLine(dst io.Writer, line []byte, lineIdx int) (n int, err error) {
|
||||
trimmed := bytes.TrimSpace(line)
|
||||
if p.written == 0 && isHTMLLine(trimmed) {
|
||||
return 0, ErrHTML
|
||||
}
|
||||
|
||||
badIdx, isRule := 0, false
|
||||
if p.titleFound {
|
||||
badIdx, isRule = parseLine(trimmed)
|
||||
} else {
|
||||
badIdx, isRule = p.parseLineTitle(trimmed)
|
||||
}
|
||||
if badIdx != -1 {
|
||||
return 0, fmt.Errorf(
|
||||
"line at index %d: character at index %d: non-printable character",
|
||||
lineIdx,
|
||||
badIdx+bytes.Index(line, trimmed),
|
||||
)
|
||||
}
|
||||
|
||||
if !isRule {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
p.rulesCount++
|
||||
p.checksum = crc32.Update(p.checksum, crc32.IEEETable, trimmed)
|
||||
|
||||
// Assume that there is generally enough space in the buffer to add a
|
||||
// newline.
|
||||
n, err = dst.Write(append(trimmed, '\n'))
|
||||
|
||||
return n, errors.Annotate(err, "writing rule line: %w")
|
||||
}
|
||||
|
||||
// isHTMLLine returns true if line is likely an HTML line. line is assumed to
|
||||
// be trimmed of whitespace characters.
|
||||
func isHTMLLine(line []byte) (isHTML bool) {
|
||||
return hasPrefixFold(line, []byte("<html")) || hasPrefixFold(line, []byte("<!doctype"))
|
||||
}
|
||||
|
||||
// hasPrefixFold is a simple, best-effort prefix matcher. It may return
|
||||
// incorrect results for some non-ASCII characters.
|
||||
func hasPrefixFold(b, prefix []byte) (ok bool) {
|
||||
l := len(prefix)
|
||||
|
||||
return len(b) >= l && bytes.EqualFold(b[:l], prefix)
|
||||
}
|
||||
|
||||
// parseLine returns true if the parsed line is a filtering rule. line is
|
||||
// assumed to be trimmed of whitespace characters. nonPrintIdx is the index of
|
||||
// the first non-printable character, if any; if there are none, nonPrintIdx is
|
||||
// -1.
|
||||
//
|
||||
// A line is considered a rule if it's not empty, not a comment, and contains
|
||||
// only printable characters.
|
||||
func parseLine(line []byte) (nonPrintIdx int, isRule bool) {
|
||||
if len(line) == 0 || line[0] == '#' || line[0] == '!' {
|
||||
return -1, false
|
||||
}
|
||||
|
||||
nonPrintIdx = bytes.IndexFunc(line, isNotPrintable)
|
||||
|
||||
return nonPrintIdx, nonPrintIdx == -1
|
||||
}
|
||||
|
||||
// isNotPrintable returns true if r is not a printable character that can be
|
||||
// contained in a filtering rule.
|
||||
func isNotPrintable(r rune) (ok bool) {
|
||||
// Tab isn't included into Unicode's graphic symbols, so include it here
|
||||
// explicitly.
|
||||
return r != '\t' && !unicode.IsGraphic(r)
|
||||
}
|
||||
|
||||
// parseLineTitle is like [parseLine] but additionally looks for a title. line
|
||||
// is assumed to be trimmed of whitespace characters.
|
||||
func (p *Parser) parseLineTitle(line []byte) (nonPrintIdx int, isRule bool) {
|
||||
if len(line) == 0 || line[0] == '#' {
|
||||
return -1, false
|
||||
}
|
||||
|
||||
if line[0] != '!' {
|
||||
nonPrintIdx = bytes.IndexFunc(line, isNotPrintable)
|
||||
|
||||
return nonPrintIdx, nonPrintIdx == -1
|
||||
}
|
||||
|
||||
const titlePattern = "! Title: "
|
||||
if !bytes.HasPrefix(line, []byte(titlePattern)) {
|
||||
return -1, false
|
||||
}
|
||||
|
||||
title := bytes.TrimSpace(line[len(titlePattern):])
|
||||
if title != nil {
|
||||
// Note that title can be a non-nil empty slice. Consider that normal
|
||||
// and just stop looking for other titles.
|
||||
p.title = string(title)
|
||||
p.titleFound = true
|
||||
}
|
||||
|
||||
return -1, false
|
||||
}
|
247
internal/filtering/rulelist/parser_test.go
Normal file
247
internal/filtering/rulelist/parser_test.go
Normal file
|
@ -0,0 +1,247 @@
|
|||
package rulelist_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/AdguardTeam/AdGuardHome/internal/aghtest"
|
||||
"github.com/AdguardTeam/AdGuardHome/internal/filtering/rulelist"
|
||||
"github.com/AdguardTeam/golibs/errors"
|
||||
"github.com/AdguardTeam/golibs/testutil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestParser_Parse(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
in string
|
||||
wantDst string
|
||||
wantErrMsg string
|
||||
wantTitle string
|
||||
wantRulesNum int
|
||||
wantWritten int
|
||||
}{{
|
||||
name: "empty",
|
||||
in: "",
|
||||
wantDst: "",
|
||||
wantErrMsg: "",
|
||||
wantTitle: "",
|
||||
wantRulesNum: 0,
|
||||
wantWritten: 0,
|
||||
}, {
|
||||
name: "html",
|
||||
in: testRuleTextHTML,
|
||||
wantErrMsg: rulelist.ErrHTML.Error(),
|
||||
wantTitle: "",
|
||||
wantRulesNum: 0,
|
||||
wantWritten: 0,
|
||||
}, {
|
||||
name: "comments",
|
||||
in: "# Comment 1\n" +
|
||||
"! Comment 2\n",
|
||||
wantErrMsg: "",
|
||||
wantTitle: "",
|
||||
wantRulesNum: 0,
|
||||
wantWritten: 0,
|
||||
}, {}, {
|
||||
name: "rule",
|
||||
in: testRuleTextBlocked,
|
||||
wantDst: testRuleTextBlocked,
|
||||
wantErrMsg: "",
|
||||
wantRulesNum: 1,
|
||||
wantTitle: "",
|
||||
wantWritten: len(testRuleTextBlocked),
|
||||
}, {
|
||||
name: "html_in_rule",
|
||||
in: testRuleTextBlocked + testRuleTextHTML,
|
||||
wantDst: testRuleTextBlocked + testRuleTextHTML,
|
||||
wantErrMsg: "",
|
||||
wantTitle: "",
|
||||
wantRulesNum: 2,
|
||||
wantWritten: len(testRuleTextBlocked) + len(testRuleTextHTML),
|
||||
}, {
|
||||
name: "title",
|
||||
in: "! Title: Test Title \n" +
|
||||
"! Title: Bad, Ignored Title\n" +
|
||||
testRuleTextBlocked,
|
||||
wantDst: testRuleTextBlocked,
|
||||
wantErrMsg: "",
|
||||
wantTitle: "Test Title",
|
||||
wantRulesNum: 1,
|
||||
wantWritten: len(testRuleTextBlocked),
|
||||
}, {
|
||||
name: "bad_char",
|
||||
in: "! Title: Test Title \n" +
|
||||
testRuleTextBlocked +
|
||||
">>>\x7F<<<",
|
||||
wantDst: testRuleTextBlocked,
|
||||
wantErrMsg: "line at index 2: " +
|
||||
"character at index 3: " +
|
||||
"non-printable character",
|
||||
wantTitle: "Test Title",
|
||||
wantRulesNum: 1,
|
||||
wantWritten: len(testRuleTextBlocked),
|
||||
}, {
|
||||
name: "too_long",
|
||||
in: strings.Repeat("a", rulelist.MaxRuleLen+1),
|
||||
wantDst: "",
|
||||
wantErrMsg: "scanning filter contents: " + bufio.ErrTooLong.Error(),
|
||||
wantTitle: "",
|
||||
wantRulesNum: 0,
|
||||
wantWritten: 0,
|
||||
}, {
|
||||
name: "bad_tab_and_comment",
|
||||
in: testRuleTextBadTab,
|
||||
wantDst: testRuleTextBadTab,
|
||||
wantErrMsg: "",
|
||||
wantTitle: "",
|
||||
wantRulesNum: 1,
|
||||
wantWritten: len(testRuleTextBadTab),
|
||||
}, {
|
||||
name: "etc_hosts_tab_and_comment",
|
||||
in: testRuleTextEtcHostsTab,
|
||||
wantDst: testRuleTextEtcHostsTab,
|
||||
wantErrMsg: "",
|
||||
wantTitle: "",
|
||||
wantRulesNum: 1,
|
||||
wantWritten: len(testRuleTextEtcHostsTab),
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dst := &bytes.Buffer{}
|
||||
buf := make([]byte, rulelist.MaxRuleLen)
|
||||
|
||||
p := rulelist.NewParser()
|
||||
r, err := p.Parse(dst, strings.NewReader(tc.in), buf)
|
||||
require.NotNil(t, r)
|
||||
|
||||
testutil.AssertErrorMsg(t, tc.wantErrMsg, err)
|
||||
assert.Equal(t, tc.wantDst, dst.String())
|
||||
assert.Equal(t, tc.wantTitle, r.Title)
|
||||
assert.Equal(t, tc.wantRulesNum, r.RulesCount)
|
||||
assert.Equal(t, tc.wantWritten, r.BytesWritten)
|
||||
|
||||
if tc.wantWritten > 0 {
|
||||
assert.NotZero(t, r.Checksum)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParser_Parse_writeError(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dst := &aghtest.Writer{
|
||||
OnWrite: func(b []byte) (n int, err error) {
|
||||
return 1, errors.Error("test error")
|
||||
},
|
||||
}
|
||||
buf := make([]byte, rulelist.MaxRuleLen)
|
||||
|
||||
p := rulelist.NewParser()
|
||||
r, err := p.Parse(dst, strings.NewReader(testRuleTextBlocked), buf)
|
||||
require.NotNil(t, r)
|
||||
|
||||
testutil.AssertErrorMsg(t, "writing rule line: test error", err)
|
||||
assert.Equal(t, 1, r.BytesWritten)
|
||||
}
|
||||
|
||||
func TestParser_Parse_checksums(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
const (
|
||||
withoutComments = testRuleTextBlocked
|
||||
withComments = "! Some comment.\n" +
|
||||
" " + testRuleTextBlocked +
|
||||
"# Another comment.\n"
|
||||
)
|
||||
|
||||
buf := make([]byte, rulelist.MaxRuleLen)
|
||||
|
||||
p := rulelist.NewParser()
|
||||
r, err := p.Parse(&bytes.Buffer{}, strings.NewReader(withoutComments), buf)
|
||||
require.NotNil(t, r)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotWithoutComments := r.Checksum
|
||||
|
||||
p = rulelist.NewParser()
|
||||
|
||||
r, err = p.Parse(&bytes.Buffer{}, strings.NewReader(withComments), buf)
|
||||
require.NotNil(t, r)
|
||||
require.NoError(t, err)
|
||||
|
||||
gotWithComments := r.Checksum
|
||||
assert.Equal(t, gotWithoutComments, gotWithComments)
|
||||
}
|
||||
|
||||
var (
|
||||
resSink *rulelist.ParseResult
|
||||
errSink error
|
||||
)
|
||||
|
||||
func BenchmarkParser_Parse(b *testing.B) {
|
||||
dst := &bytes.Buffer{}
|
||||
src := strings.NewReader(strings.Repeat(testRuleTextBlocked, 1000))
|
||||
buf := make([]byte, rulelist.MaxRuleLen)
|
||||
p := rulelist.NewParser()
|
||||
|
||||
b.ReportAllocs()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
resSink, errSink = p.Parse(dst, src, buf)
|
||||
dst.Reset()
|
||||
}
|
||||
|
||||
require.NoError(b, errSink)
|
||||
require.NotNil(b, resSink)
|
||||
}
|
||||
|
||||
func FuzzParser_Parse(f *testing.F) {
|
||||
const n = 64
|
||||
|
||||
testCases := []string{
|
||||
"",
|
||||
"# Comment",
|
||||
"! Comment",
|
||||
"! Title ",
|
||||
"! Title XXX",
|
||||
testRuleTextEtcHostsTab,
|
||||
testRuleTextHTML,
|
||||
testRuleTextBlocked,
|
||||
testRuleTextBadTab,
|
||||
"1.2.3.4",
|
||||
"1.2.3.4 etc-hosts.example",
|
||||
">>>\x00<<<",
|
||||
">>>\x7F<<<",
|
||||
strings.Repeat("a", n+1),
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
f.Add(tc)
|
||||
}
|
||||
|
||||
buf := make([]byte, n)
|
||||
|
||||
f.Fuzz(func(t *testing.T, input string) {
|
||||
require.Eventually(t, func() (ok bool) {
|
||||
dst := &bytes.Buffer{}
|
||||
src := strings.NewReader(input)
|
||||
|
||||
p := rulelist.NewParser()
|
||||
r, _ := p.Parse(dst, src, buf)
|
||||
require.NotNil(t, r)
|
||||
|
||||
return true
|
||||
}, testTimeout, testTimeout/100)
|
||||
})
|
||||
}
|
11
internal/filtering/rulelist/rulelist.go
Normal file
11
internal/filtering/rulelist/rulelist.go
Normal file
|
@ -0,0 +1,11 @@
|
|||
// Package rulelist contains the implementation of the standard rule-list
|
||||
// filter that wraps an urlfilter filtering-engine.
|
||||
//
|
||||
// TODO(a.garipov): Expand.
|
||||
package rulelist
|
||||
|
||||
// MaxRuleLen is the maximum length of a line with a filtering rule, in bytes.
|
||||
//
|
||||
// TODO(a.garipov): Consider changing this to a rune length, like AdGuardDNS
|
||||
// does.
|
||||
const MaxRuleLen = 1024
|
14
internal/filtering/rulelist/rulelist_test.go
Normal file
14
internal/filtering/rulelist/rulelist_test.go
Normal file
|
@ -0,0 +1,14 @@
|
|||
package rulelist_test
|
||||
|
||||
import "time"
|
||||
|
||||
// testTimeout is the common timeout for tests.
|
||||
const testTimeout = 1 * time.Second
|
||||
|
||||
// Common texts for tests.
|
||||
const (
|
||||
testRuleTextHTML = "<!DOCTYPE html>\n"
|
||||
testRuleTextBlocked = "||blocked.example^\n"
|
||||
testRuleTextBadTab = "||bad-tab-and-comment.example^\t# A comment.\n"
|
||||
testRuleTextEtcHostsTab = "0.0.0.0 tab..example^\t# A comment.\n"
|
||||
)
|
|
@ -6,13 +6,13 @@ import (
|
|||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"github.com/AdguardTeam/AdGuardHome/internal/aghtest"
|
||||
"github.com/AdguardTeam/golibs/testutil/fakenet"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestWaitListener_Accept(t *testing.T) {
|
||||
var accepted atomic.Bool
|
||||
var l net.Listener = &aghtest.Listener{
|
||||
var l net.Listener = &fakenet.Listener{
|
||||
OnAccept: func() (conn net.Conn, err error) {
|
||||
accepted.Store(true)
|
||||
|
||||
|
|
|
@ -10,9 +10,10 @@ require (
|
|||
github.com/kyoh86/looppointer v0.2.1
|
||||
github.com/securego/gosec/v2 v2.16.0
|
||||
github.com/uudashr/gocognit v1.0.6
|
||||
golang.org/x/tools v0.10.0
|
||||
golang.org/x/tools v0.11.0
|
||||
golang.org/x/vuln v0.2.0
|
||||
honnef.co/go/tools v0.4.3
|
||||
// TODO(a.garipov): Return to tagged releases once a new one appears.
|
||||
honnef.co/go/tools v0.5.0-0.dev.0.20230706211743-ddee6bbaa341
|
||||
mvdan.cc/gofumpt v0.5.0
|
||||
mvdan.cc/unparam v0.0.0-20230610194454-9ea02bef9868
|
||||
)
|
||||
|
@ -27,8 +28,8 @@ require (
|
|||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect
|
||||
golang.org/x/exp/typeparams v0.0.0-20230626212559-97b1e661b5df // indirect
|
||||
golang.org/x/mod v0.11.0 // indirect
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.9.0 // indirect
|
||||
golang.org/x/sys v0.10.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
|
|
@ -58,15 +58,15 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU=
|
||||
golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
|
||||
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.11.0 h1:Gi2tvZIJyBtO9SDr1q9h5hEQCp/4L2RQ+ar0qjx2oNU=
|
||||
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -82,8 +82,8 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s=
|
||||
golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
|
@ -96,8 +96,8 @@ golang.org/x/tools v0.0.0-20201007032633-0806396f153e/go.mod h1:z6u4i615ZeAfBE4X
|
|||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4=
|
||||
golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg=
|
||||
golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM=
|
||||
golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8=
|
||||
golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8=
|
||||
golang.org/x/vuln v0.2.0 h1:Dlz47lW0pvPHU7tnb10S8vbMn9GnV2B6eyT7Tem5XBI=
|
||||
golang.org/x/vuln v0.2.0/go.mod h1:V0eyhHwaAaHrt42J9bgrN6rd12f6GU4T0Lu0ex2wDg4=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
@ -107,8 +107,8 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+
|
|||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.4.3 h1:o/n5/K5gXqk8Gozvs2cnL0F2S1/g1vcGCAx2vETjITw=
|
||||
honnef.co/go/tools v0.4.3/go.mod h1:36ZgoUOrqOk1GxwHhyryEkq8FQWkUO2xGuSMhUCcdvA=
|
||||
honnef.co/go/tools v0.5.0-0.dev.0.20230706211743-ddee6bbaa341 h1:jNlTAPEjbDiN9qda/1wple0GSpewFnWhvc1GO7bZX1U=
|
||||
honnef.co/go/tools v0.5.0-0.dev.0.20230706211743-ddee6bbaa341/go.mod h1:GUV+uIBCLpdf0/v6UhHHG/yzI/z6qPskBeQCjcNB96k=
|
||||
mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E=
|
||||
mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js=
|
||||
mvdan.cc/unparam v0.0.0-20230610194454-9ea02bef9868 h1:F4Q7pXcrU9UiU1fq0ZWqSOxKjNAteRuDr7JDk7uVLRQ=
|
||||
|
|
|
@ -18,7 +18,7 @@ Run `make init` from the project root.
|
|||
|
||||
|
||||
|
||||
## `make/`: Makefile Scripts
|
||||
## `make/`: Makefile scripts
|
||||
|
||||
The release channels are: `development` (the default), `edge`, `beta`, and
|
||||
`release`. If verbosity levels aren't documented here, there are only two: `0`,
|
||||
|
@ -26,7 +26,7 @@ don't print anything, and `1`, be verbose.
|
|||
|
||||
|
||||
|
||||
### `build-docker.sh`: Build A Multi-Architecture Docker Image
|
||||
### `build-docker.sh`: Build a multi-architecture Docker image
|
||||
|
||||
Required environment:
|
||||
|
||||
|
@ -51,7 +51,7 @@ Optional environment:
|
|||
|
||||
|
||||
|
||||
### `build-release.sh`: Build A Release For All Platforms
|
||||
### `build-release.sh`: Build a release for all platforms
|
||||
|
||||
Required environment:
|
||||
|
||||
|
@ -101,7 +101,22 @@ Required environment:
|
|||
|
||||
|
||||
|
||||
### `go-build.sh`: Build The Backend
|
||||
### `go-bench.sh`: Run backend benchmarks
|
||||
|
||||
Optional environment:
|
||||
|
||||
* `GO`: set an alternative name for the Go compiler.
|
||||
|
||||
* `TIMEOUT_FLAGS`: set timeout flags for tests. The default value is
|
||||
`--timeout=30s`.
|
||||
|
||||
* `VERBOSE`: verbosity level. `1` shows every command that is run and every
|
||||
Go package that is processed. `2` also shows subcommands and environment.
|
||||
The default value is `0`, don't be verbose.
|
||||
|
||||
|
||||
|
||||
### `go-build.sh`: Build the backend
|
||||
|
||||
Optional environment:
|
||||
|
||||
|
@ -135,7 +150,7 @@ Required environment:
|
|||
|
||||
|
||||
|
||||
### `go-deps.sh`: Install Backend Dependencies
|
||||
### `go-deps.sh`: Install backend dependencies
|
||||
|
||||
Optional environment:
|
||||
|
||||
|
@ -147,7 +162,25 @@ Optional environment:
|
|||
|
||||
|
||||
|
||||
### `go-lint.sh`: Run Backend Static Analyzers
|
||||
### `go-fuzz.sh`: Run backend fuzz tests
|
||||
|
||||
Optional environment:
|
||||
|
||||
* `GO`: set an alternative name for the Go compiler.
|
||||
|
||||
* `FUZZTIME_FLAGS`: set fuss flags for tests. The default value is
|
||||
`--fuzztime=20s`.
|
||||
|
||||
* `TIMEOUT_FLAGS`: set timeout flags for tests. The default value is
|
||||
`--timeout=30s`.
|
||||
|
||||
* `VERBOSE`: verbosity level. `1` shows every command that is run and every
|
||||
Go package that is processed. `2` also shows subcommands and environment.
|
||||
The default value is `0`, don't be verbose.
|
||||
|
||||
|
||||
|
||||
### `go-lint.sh`: Run backend static analyzers
|
||||
|
||||
Don't forget to run `make go-tools` once first!
|
||||
|
||||
|
@ -163,7 +196,7 @@ Optional environment:
|
|||
|
||||
|
||||
|
||||
### `go-test.sh`: Run Backend Tests
|
||||
### `go-test.sh`: Run backend tests
|
||||
|
||||
Optional environment:
|
||||
|
||||
|
@ -173,7 +206,7 @@ Optional environment:
|
|||
`1`, use the race detector.
|
||||
|
||||
* `TIMEOUT_FLAGS`: set timeout flags for tests. The default value is
|
||||
`--timeout 30s`.
|
||||
`--timeout=30s`.
|
||||
|
||||
* `VERBOSE`: verbosity level. `1` shows every command that is run and every
|
||||
Go package that is processed. `2` also shows subcommands. The default
|
||||
|
@ -181,7 +214,7 @@ Optional environment:
|
|||
|
||||
|
||||
|
||||
### `go-tools.sh`: Install Backend Tooling
|
||||
### `go-tools.sh`: Install backend tooling
|
||||
|
||||
Installs the Go static analysis and other tools into `${PWD}/bin`. Either add
|
||||
`${PWD}/bin` to your `$PATH` before all other entries, or use the commands
|
||||
|
|
55
scripts/make/go-bench.sh
Normal file
55
scripts/make/go-bench.sh
Normal file
|
@ -0,0 +1,55 @@
|
|||
#!/bin/sh
|
||||
|
||||
verbose="${VERBOSE:-0}"
|
||||
readonly verbose
|
||||
|
||||
# Verbosity levels:
|
||||
# 0 = Don't print anything except for errors.
|
||||
# 1 = Print commands, but not nested commands.
|
||||
# 2 = Print everything.
|
||||
if [ "$verbose" -gt '1' ]
|
||||
then
|
||||
set -x
|
||||
v_flags='-v=1'
|
||||
x_flags='-x=1'
|
||||
elif [ "$verbose" -gt '0' ]
|
||||
then
|
||||
set -x
|
||||
v_flags='-v=1'
|
||||
x_flags='-x=0'
|
||||
else
|
||||
set +x
|
||||
v_flags='-v=0'
|
||||
x_flags='-x=0'
|
||||
fi
|
||||
readonly v_flags x_flags
|
||||
|
||||
set -e -f -u
|
||||
|
||||
if [ "${RACE:-1}" -eq '0' ]
|
||||
then
|
||||
race_flags='--race=0'
|
||||
else
|
||||
race_flags='--race=1'
|
||||
fi
|
||||
readonly race_flags
|
||||
|
||||
go="${GO:-go}"
|
||||
|
||||
count_flags='--count=1'
|
||||
shuffle_flags='--shuffle=on'
|
||||
timeout_flags="${TIMEOUT_FLAGS:---timeout=30s}"
|
||||
readonly go count_flags shuffle_flags timeout_flags
|
||||
|
||||
"$go" test\
|
||||
"$count_flags"\
|
||||
"$shuffle_flags"\
|
||||
"$race_flags"\
|
||||
"$timeout_flags"\
|
||||
"$x_flags"\
|
||||
"$v_flags"\
|
||||
--bench='.'\
|
||||
--benchmem\
|
||||
--benchtime=1s\
|
||||
--run='^$'\
|
||||
./...
|
58
scripts/make/go-fuzz.sh
Normal file
58
scripts/make/go-fuzz.sh
Normal file
|
@ -0,0 +1,58 @@
|
|||
#!/bin/sh
|
||||
|
||||
verbose="${VERBOSE:-0}"
|
||||
readonly verbose
|
||||
|
||||
# Verbosity levels:
|
||||
# 0 = Don't print anything except for errors.
|
||||
# 1 = Print commands, but not nested commands.
|
||||
# 2 = Print everything.
|
||||
if [ "$verbose" -gt '1' ]
|
||||
then
|
||||
set -x
|
||||
v_flags='-v=1'
|
||||
x_flags='-x=1'
|
||||
elif [ "$verbose" -gt '0' ]
|
||||
then
|
||||
set -x
|
||||
v_flags='-v=1'
|
||||
x_flags='-x=0'
|
||||
else
|
||||
set +x
|
||||
v_flags='-v=0'
|
||||
x_flags='-x=0'
|
||||
fi
|
||||
readonly v_flags x_flags
|
||||
|
||||
set -e -f -u
|
||||
|
||||
if [ "${RACE:-1}" -eq '0' ]
|
||||
then
|
||||
race_flags='--race=0'
|
||||
else
|
||||
race_flags='--race=1'
|
||||
fi
|
||||
readonly race_flags
|
||||
|
||||
go="${GO:-go}"
|
||||
|
||||
count_flags='--count=1'
|
||||
shuffle_flags='--shuffle=on'
|
||||
timeout_flags="${TIMEOUT_FLAGS:---timeout=30s}"
|
||||
fuzztime_flags="${FUZZTIME_FLAGS:---fuzztime=20s}"
|
||||
|
||||
readonly go count_flags shuffle_flags timeout_flags fuzztime_flags
|
||||
|
||||
# TODO(a.garipov): File an issue about using --fuzz with multiple packages.
|
||||
"$go" test\
|
||||
"$count_flags"\
|
||||
"$shuffle_flags"\
|
||||
"$race_flags"\
|
||||
"$timeout_flags"\
|
||||
"$x_flags"\
|
||||
"$v_flags"\
|
||||
"$fuzztime_flags"\
|
||||
--fuzz='.'\
|
||||
--run='^$'\
|
||||
./internal/filtering/rulelist/\
|
||||
;
|
|
@ -176,6 +176,8 @@ run_linter gocognit --over 10\
|
|||
./internal/aghchan/\
|
||||
./internal/aghhttp/\
|
||||
./internal/aghio/\
|
||||
./internal/filtering/hashprefix/\
|
||||
./internal/filtering/rulelist/\
|
||||
./internal/next/\
|
||||
./internal/rdns/\
|
||||
./internal/tools/\
|
||||
|
@ -211,6 +213,8 @@ run_linter gosec --quiet\
|
|||
./internal/dhcpd\
|
||||
./internal/dhcpsvc\
|
||||
./internal/dnsforward\
|
||||
./internal/filtering/hashprefix/\
|
||||
./internal/filtering/rulelist/\
|
||||
./internal/next\
|
||||
./internal/schedule\
|
||||
./internal/stats\
|
||||
|
@ -219,8 +223,7 @@ run_linter gosec --quiet\
|
|||
./internal/whois\
|
||||
;
|
||||
|
||||
# TODO(a.garipov): Enable --blank?
|
||||
run_linter errcheck --asserts ./...
|
||||
run_linter errcheck ./...
|
||||
|
||||
staticcheck_matrix='
|
||||
darwin: GOOS=darwin
|
||||
|
|
Loading…
Reference in a new issue