mirror of
https://github.com/AdguardTeam/AdGuardHome.git
synced 2025-04-02 15:33:36 +03:00
all: sync with master; upd chlog
This commit is contained in:
parent
7030c7c24c
commit
c65700923a
76 changed files with 2998 additions and 1909 deletions
2
.github/workflows/build.yml
vendored
2
.github/workflows/build.yml
vendored
|
@ -1,7 +1,7 @@
|
||||||
'name': 'build'
|
'name': 'build'
|
||||||
|
|
||||||
'env':
|
'env':
|
||||||
'GO_VERSION': '1.19.8'
|
'GO_VERSION': '1.19.10'
|
||||||
'NODE_VERSION': '14'
|
'NODE_VERSION': '14'
|
||||||
|
|
||||||
'on':
|
'on':
|
||||||
|
|
2
.github/workflows/lint.yml
vendored
2
.github/workflows/lint.yml
vendored
|
@ -1,7 +1,7 @@
|
||||||
'name': 'lint'
|
'name': 'lint'
|
||||||
|
|
||||||
'env':
|
'env':
|
||||||
'GO_VERSION': '1.19.8'
|
'GO_VERSION': '1.19.10'
|
||||||
|
|
||||||
'on':
|
'on':
|
||||||
'push':
|
'push':
|
||||||
|
|
50
CHANGELOG.md
50
CHANGELOG.md
|
@ -14,21 +14,57 @@ and this project adheres to
|
||||||
<!--
|
<!--
|
||||||
## [v0.108.0] - TBA
|
## [v0.108.0] - TBA
|
||||||
|
|
||||||
## [v0.107.30] - 2023-04-26 (APPROX.)
|
## [v0.107.31] - 2023-06-28 (APPROX.)
|
||||||
|
|
||||||
See also the [v0.107.30 GitHub milestone][ms-v0.107.30].
|
See also the [v0.107.31 GitHub milestone][ms-v0.107.31].
|
||||||
|
|
||||||
[ms-v0.107.30]: https://github.com/AdguardTeam/AdGuardHome/milestone/66?closed=1
|
[ms-v0.107.31]: https://github.com/AdguardTeam/AdGuardHome/milestone/67?closed=1
|
||||||
|
|
||||||
NOTE: Add new changes BELOW THIS COMMENT.
|
NOTE: Add new changes BELOW THIS COMMENT.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- The ability to edit rewrite rules via `PUT /control/rewrite/update` HTTP API
|
||||||
|
([#1577]).
|
||||||
|
|
||||||
|
[#1577]: https://github.com/AdguardTeam/AdGuardHome/issues/1577
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
NOTE: Add new changes ABOVE THIS COMMENT.
|
NOTE: Add new changes ABOVE THIS COMMENT.
|
||||||
-->
|
-->
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## [v0.107.30] - 2023-06-07
|
||||||
|
|
||||||
|
See also the [v0.107.30 GitHub milestone][ms-v0.107.30].
|
||||||
|
|
||||||
|
### Security
|
||||||
|
|
||||||
|
- Go version has been updated to prevent the possibility of exploiting the
|
||||||
|
CVE-2023-29402, CVE-2023-29403, and CVE-2023-29404 Go vulnerabilities fixed in
|
||||||
|
[Go 1.19.10][go-1.19.10].
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Unquoted IPv6 bind hosts with trailing colons erroneously considered
|
||||||
|
unspecified addresses are now properly validated ([#5752]).
|
||||||
|
|
||||||
|
**NOTE:** the Docker healthcheck script now also doesn't interpret the `""`
|
||||||
|
value as unspecified address.
|
||||||
|
- Incorrect `Content-Type` header value in `POST /control/version.json` and `GET
|
||||||
|
/control/dhcp/interfaces` HTTP APIs ([#5716]).
|
||||||
|
- Provided bootstrap servers are now used to resolve the hostnames of plain
|
||||||
|
UDP/TCP upstream servers.
|
||||||
|
|
||||||
|
[#5716]: https://github.com/AdguardTeam/AdGuardHome/issues/5716
|
||||||
|
|
||||||
|
[go-1.19.10]: https://groups.google.com/g/golang-announce/c/q5135a9d924/m/j0ZoAJOHAwAJ
|
||||||
|
[ms-v0.107.30]: https://github.com/AdguardTeam/AdGuardHome/milestone/66?closed=1
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## [v0.107.29] - 2023-04-18
|
## [v0.107.29] - 2023-04-18
|
||||||
|
|
||||||
See also the [v0.107.29 GitHub milestone][ms-v0.107.29].
|
See also the [v0.107.29 GitHub milestone][ms-v0.107.29].
|
||||||
|
@ -55,6 +91,7 @@ See also the [v0.107.29 GitHub milestone][ms-v0.107.29].
|
||||||
[#5712]: https://github.com/AdguardTeam/AdGuardHome/issues/5712
|
[#5712]: https://github.com/AdguardTeam/AdGuardHome/issues/5712
|
||||||
[#5721]: https://github.com/AdguardTeam/AdGuardHome/issues/5721
|
[#5721]: https://github.com/AdguardTeam/AdGuardHome/issues/5721
|
||||||
[#5725]: https://github.com/AdguardTeam/AdGuardHome/issues/5725
|
[#5725]: https://github.com/AdguardTeam/AdGuardHome/issues/5725
|
||||||
|
[#5752]: https://github.com/AdguardTeam/AdGuardHome/issues/5752
|
||||||
|
|
||||||
[ms-v0.107.29]: https://github.com/AdguardTeam/AdGuardHome/milestone/65?closed=1
|
[ms-v0.107.29]: https://github.com/AdguardTeam/AdGuardHome/milestone/65?closed=1
|
||||||
|
|
||||||
|
@ -1949,11 +1986,12 @@ See also the [v0.104.2 GitHub milestone][ms-v0.104.2].
|
||||||
|
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
[Unreleased]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.30...HEAD
|
[Unreleased]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.31...HEAD
|
||||||
[v0.107.30]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.29...v0.107.30
|
[v0.107.31]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.30...v0.107.31
|
||||||
-->
|
-->
|
||||||
|
|
||||||
[Unreleased]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.29...HEAD
|
[Unreleased]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.30...HEAD
|
||||||
|
[v0.107.30]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.29...v0.107.30
|
||||||
[v0.107.29]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.28...v0.107.29
|
[v0.107.29]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.28...v0.107.29
|
||||||
[v0.107.28]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.27...v0.107.28
|
[v0.107.28]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.27...v0.107.28
|
||||||
[v0.107.27]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.26...v0.107.27
|
[v0.107.27]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.26...v0.107.27
|
||||||
|
|
|
@ -466,6 +466,10 @@ bug or implementing the feature.
|
||||||
Home](https://github.com/ebrianne/adguard-exporter) by
|
Home](https://github.com/ebrianne/adguard-exporter) by
|
||||||
[@ebrianne](https://github.com/ebrianne).
|
[@ebrianne](https://github.com/ebrianne).
|
||||||
|
|
||||||
|
* [Terminal-based, real-time traffic monitoring and statistics for your AdGuard Home
|
||||||
|
instance](https://github.com/Lissy93/AdGuardian-Term) by
|
||||||
|
[@Lissy93](https://github.com/Lissy93)
|
||||||
|
|
||||||
* [AdGuard Home on GLInet
|
* [AdGuard Home on GLInet
|
||||||
routers](https://forum.gl-inet.com/t/adguardhome-on-gl-routers/10664) by
|
routers](https://forum.gl-inet.com/t/adguardhome-on-gl-routers/10664) by
|
||||||
[Gl-Inet](https://gl-inet.com/).
|
[Gl-Inet](https://gl-inet.com/).
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
# Make sure to sync any changes with the branch overrides below.
|
# Make sure to sync any changes with the branch overrides below.
|
||||||
'variables':
|
'variables':
|
||||||
'channel': 'edge'
|
'channel': 'edge'
|
||||||
'dockerGo': 'adguard/golang-ubuntu:6.3'
|
'dockerGo': 'adguard/golang-ubuntu:6.7'
|
||||||
|
|
||||||
'stages':
|
'stages':
|
||||||
- 'Build frontend':
|
- 'Build frontend':
|
||||||
|
@ -232,25 +232,24 @@
|
||||||
case "$channel"
|
case "$channel"
|
||||||
in
|
in
|
||||||
('release')
|
('release')
|
||||||
snapchannel='candidate'
|
snapchannel='candidate'
|
||||||
;;
|
;;
|
||||||
('beta')
|
('beta')
|
||||||
snapchannel='beta'
|
snapchannel='beta'
|
||||||
;;
|
;;
|
||||||
('edge')
|
('edge')
|
||||||
snapchannel='edge'
|
snapchannel='edge'
|
||||||
;;
|
;;
|
||||||
(*)
|
(*)
|
||||||
echo "invalid channel '$channel'"
|
echo "invalid channel '$channel'"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
env\
|
env\
|
||||||
SNAPCRAFT_CHANNEL="$snapchannel"\
|
SNAPCRAFT_CHANNEL="$snapchannel"\
|
||||||
SNAPCRAFT_EMAIL="${bamboo.snapcraftEmail}"\
|
SNAPCRAFT_EMAIL="${bamboo.snapcraftEmail}"\
|
||||||
SNAPCRAFT_MACAROON="${bamboo.snapcraftMacaroonPassword}"\
|
SNAPCRAFT_STORE_CREDENTIALS="${bamboo.snapcraftMacaroonPassword}"\
|
||||||
SNAPCRAFT_UBUNTU_DISCHARGE="${bamboo.snapcraftUbuntuDischargePassword}"\
|
|
||||||
../bamboo-deploy-publisher/deploy.sh adguard-home-snap
|
../bamboo-deploy-publisher/deploy.sh adguard-home-snap
|
||||||
'final-tasks':
|
'final-tasks':
|
||||||
- 'clean'
|
- 'clean'
|
||||||
|
@ -280,9 +279,9 @@
|
||||||
|
|
||||||
if [ "$channel" != 'release' ] && [ "${channel}" != 'beta' ]
|
if [ "$channel" != 'release' ] && [ "${channel}" != 'beta' ]
|
||||||
then
|
then
|
||||||
echo "don't publish to GitHub Releases for this channel"
|
echo "don't publish to GitHub Releases for this channel"
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd ./dist/
|
cd ./dist/
|
||||||
|
@ -331,7 +330,7 @@
|
||||||
# need to build a few of these.
|
# need to build a few of these.
|
||||||
'variables':
|
'variables':
|
||||||
'channel': 'beta'
|
'channel': 'beta'
|
||||||
'dockerGo': 'adguard/golang-ubuntu:6.3'
|
'dockerGo': 'adguard/golang-ubuntu:6.7'
|
||||||
# release-vX.Y.Z branches are the branches from which the actual final release
|
# release-vX.Y.Z branches are the branches from which the actual final release
|
||||||
# is built.
|
# is built.
|
||||||
- '^release-v[0-9]+\.[0-9]+\.[0-9]+':
|
- '^release-v[0-9]+\.[0-9]+\.[0-9]+':
|
||||||
|
@ -346,4 +345,4 @@
|
||||||
# are the ones that actually get released.
|
# are the ones that actually get released.
|
||||||
'variables':
|
'variables':
|
||||||
'channel': 'release'
|
'channel': 'release'
|
||||||
'dockerGo': 'adguard/golang-ubuntu:6.3'
|
'dockerGo': 'adguard/golang-ubuntu:6.7'
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
'key': 'AHBRTSPECS'
|
'key': 'AHBRTSPECS'
|
||||||
'name': 'AdGuard Home - Build and run tests'
|
'name': 'AdGuard Home - Build and run tests'
|
||||||
'variables':
|
'variables':
|
||||||
'dockerGo': 'adguard/golang-ubuntu:6.3'
|
'dockerGo': 'adguard/golang-ubuntu:6.7'
|
||||||
|
|
||||||
'stages':
|
'stages':
|
||||||
- 'Tests':
|
- 'Tests':
|
||||||
|
|
|
@ -150,7 +150,7 @@
|
||||||
"dns_allowlists": "Белыя спісы DNS",
|
"dns_allowlists": "Белыя спісы DNS",
|
||||||
"dns_blocklists_desc": "AdGuard Home будзе блакаваць дамены з чорных спісаў.",
|
"dns_blocklists_desc": "AdGuard Home будзе блакаваць дамены з чорных спісаў.",
|
||||||
"dns_allowlists_desc": "Дамены з белых спісаў DNS будуць дазволены, нават калі яны знаходзяцца ў любым з чорных спісаў.",
|
"dns_allowlists_desc": "Дамены з белых спісаў DNS будуць дазволены, нават калі яны знаходзяцца ў любым з чорных спісаў.",
|
||||||
"custom_filtering_rules": "Карыстацкія правілы фільтрацыі",
|
"custom_filtering_rules": "Карыстальніцкія правілы фільтрацыі",
|
||||||
"encryption_settings": "Налады шыфравання",
|
"encryption_settings": "Налады шыфравання",
|
||||||
"dhcp_settings": "Налады DHCP",
|
"dhcp_settings": "Налады DHCP",
|
||||||
"upstream_dns": "Upstream DNS-серверы",
|
"upstream_dns": "Upstream DNS-серверы",
|
||||||
|
@ -247,7 +247,7 @@
|
||||||
"loading_table_status": "Загрузка...",
|
"loading_table_status": "Загрузка...",
|
||||||
"page_table_footer_text": "Старонка",
|
"page_table_footer_text": "Старонка",
|
||||||
"rows_table_footer_text": "радкоў",
|
"rows_table_footer_text": "радкоў",
|
||||||
"updated_custom_filtering_toast": "Занесены змены ў карыстацкія правілы",
|
"updated_custom_filtering_toast": "Карыстальніцкія правілы паспяхова захаваны",
|
||||||
"rule_removed_from_custom_filtering_toast": "Карыстацкае правіла выдалена: {{rule}}",
|
"rule_removed_from_custom_filtering_toast": "Карыстацкае правіла выдалена: {{rule}}",
|
||||||
"rule_added_to_custom_filtering_toast": "Карыстацкае правіла дададзена: {{rule}}",
|
"rule_added_to_custom_filtering_toast": "Карыстацкае правіла дададзена: {{rule}}",
|
||||||
"query_log_response_status": "Статус: {{value}}",
|
"query_log_response_status": "Статус: {{value}}",
|
||||||
|
@ -568,7 +568,7 @@
|
||||||
"check_desc": "Праверыць фільтрацыю імя хаста",
|
"check_desc": "Праверыць фільтрацыю імя хаста",
|
||||||
"check": "Праверыць",
|
"check": "Праверыць",
|
||||||
"form_enter_host": "Увядзіце імя хаста",
|
"form_enter_host": "Увядзіце імя хаста",
|
||||||
"filtered_custom_rules": "Адфільтраваны з дапамогай карыстацкіх правілаў фільтрацыі",
|
"filtered_custom_rules": "Адфільтраваны з дапамогай карыстальніцкіх правіл фільтрацыі",
|
||||||
"choose_from_list": "Абраць са спіса",
|
"choose_from_list": "Абраць са спіса",
|
||||||
"add_custom_list": "Дадаць свой спіс",
|
"add_custom_list": "Дадаць свой спіс",
|
||||||
"host_whitelisted": "Хост занесены ў белы спіс",
|
"host_whitelisted": "Хост занесены ў белы спіс",
|
||||||
|
|
|
@ -268,6 +268,8 @@
|
||||||
"blocking_mode_nxdomain": "NXDOMAIN: پاسخ با کُد NXDOMAIN",
|
"blocking_mode_nxdomain": "NXDOMAIN: پاسخ با کُد NXDOMAIN",
|
||||||
"blocking_mode_null_ip": "Null IP: پاسخ با آدرس آی پی صفر(0.0.0.0 برای A; :: برای AAAA)",
|
"blocking_mode_null_ip": "Null IP: پاسخ با آدرس آی پی صفر(0.0.0.0 برای A; :: برای AAAA)",
|
||||||
"blocking_mode_custom_ip": "آی پی دستی: پاسخ با آدرس آی پی دستی تنظیم شده",
|
"blocking_mode_custom_ip": "آی پی دستی: پاسخ با آدرس آی پی دستی تنظیم شده",
|
||||||
|
"theme_light": "پوسته روشن",
|
||||||
|
"theme_dark": "پوسته تیره",
|
||||||
"upstream_dns_client_desc": "اگر این فیلد را خالی نگه دارید، AdGuard Home از سرور پیکربندی شده در <0> تنظیماتDNS </0> استفاده می کند.",
|
"upstream_dns_client_desc": "اگر این فیلد را خالی نگه دارید، AdGuard Home از سرور پیکربندی شده در <0> تنظیماتDNS </0> استفاده می کند.",
|
||||||
"tracker_source": "منبع ردیاب",
|
"tracker_source": "منبع ردیاب",
|
||||||
"source_label": "منبع",
|
"source_label": "منبع",
|
||||||
|
@ -567,5 +569,6 @@
|
||||||
"use_saved_key": "از کلید ذخیره شده قبلی استفاده کنید",
|
"use_saved_key": "از کلید ذخیره شده قبلی استفاده کنید",
|
||||||
"parental_control": "نظارت والدین",
|
"parental_control": "نظارت والدین",
|
||||||
"safe_browsing": "وب گردی اَمن",
|
"safe_browsing": "وب گردی اَمن",
|
||||||
"form_error_password_length": "رمزعبور باید حداقل {{value}} کاراکتر باشد."
|
"form_error_password_length": "رمزعبور باید حداقل {{value}} کاراکتر باشد.",
|
||||||
|
"protection_section_label": "حفاظت"
|
||||||
}
|
}
|
||||||
|
|
|
@ -86,7 +86,7 @@
|
||||||
"request_details": "Pyynnön tiedot",
|
"request_details": "Pyynnön tiedot",
|
||||||
"client_details": "Päätelaitteen tiedot",
|
"client_details": "Päätelaitteen tiedot",
|
||||||
"details": "Yksityiskohdat",
|
"details": "Yksityiskohdat",
|
||||||
"back": "Takaisin",
|
"back": "Palaa takaisin",
|
||||||
"dashboard": "Tila",
|
"dashboard": "Tila",
|
||||||
"settings": "Asetukset",
|
"settings": "Asetukset",
|
||||||
"filters": "Suodattimet",
|
"filters": "Suodattimet",
|
||||||
|
@ -146,8 +146,8 @@
|
||||||
"no_servers_specified": "Palvelimia ei ole määritetty",
|
"no_servers_specified": "Palvelimia ei ole määritetty",
|
||||||
"general_settings": "Yleiset asetukset",
|
"general_settings": "Yleiset asetukset",
|
||||||
"dns_settings": "DNS-asetukset",
|
"dns_settings": "DNS-asetukset",
|
||||||
"dns_blocklists": "DNS-estolistat",
|
"dns_blocklists": "DNS-estot",
|
||||||
"dns_allowlists": "DNS-sallittujen listat",
|
"dns_allowlists": "DNS-sallinnat",
|
||||||
"dns_blocklists_desc": "AdGuard Home estää estolistalla olevat verkkotunnukset.",
|
"dns_blocklists_desc": "AdGuard Home estää estolistalla olevat verkkotunnukset.",
|
||||||
"dns_allowlists_desc": "DNS-sallittujen listalla olevat verkkotunnukset sallitaan myös silloin, jos ne ovat jollain muulla estolistalla.",
|
"dns_allowlists_desc": "DNS-sallittujen listalla olevat verkkotunnukset sallitaan myös silloin, jos ne ovat jollain muulla estolistalla.",
|
||||||
"custom_filtering_rules": "Omat suodatussäännöt",
|
"custom_filtering_rules": "Omat suodatussäännöt",
|
||||||
|
@ -627,7 +627,7 @@
|
||||||
"cache_optimistic": "Optimistinen välimuisti",
|
"cache_optimistic": "Optimistinen välimuisti",
|
||||||
"cache_optimistic_desc": "Pakota AdGuard Home vastaamaan välimuistista vaikka tiedot olisivat vanhentuneet. Pyri samalla myös päivittämään tiedot.",
|
"cache_optimistic_desc": "Pakota AdGuard Home vastaamaan välimuistista vaikka tiedot olisivat vanhentuneet. Pyri samalla myös päivittämään tiedot.",
|
||||||
"filter_category_general": "Yleiset",
|
"filter_category_general": "Yleiset",
|
||||||
"filter_category_security": "Turvallisuus",
|
"filter_category_security": "Tietoturva",
|
||||||
"filter_category_regional": "Alueelliset",
|
"filter_category_regional": "Alueelliset",
|
||||||
"filter_category_other": "Muut",
|
"filter_category_other": "Muut",
|
||||||
"filter_category_general_desc": "Listat, jotka estävät seurannan ja mainokset useimmilla laitteilla",
|
"filter_category_general_desc": "Listat, jotka estävät seurannan ja mainokset useimmilla laitteilla",
|
||||||
|
|
|
@ -282,6 +282,8 @@
|
||||||
"blocking_mode_null_ip": "Null IP: Svar med en 0-IP-adresse (0.0.0.0 for A; :: for AAAA)",
|
"blocking_mode_null_ip": "Null IP: Svar med en 0-IP-adresse (0.0.0.0 for A; :: for AAAA)",
|
||||||
"blocking_mode_custom_ip": "Tilpasset IP: Svar med en manuelt valgt IP-adresse",
|
"blocking_mode_custom_ip": "Tilpasset IP: Svar med en manuelt valgt IP-adresse",
|
||||||
"theme_auto": "Auto",
|
"theme_auto": "Auto",
|
||||||
|
"theme_light": "Lyst tema",
|
||||||
|
"theme_dark": "Mørkt tema",
|
||||||
"upstream_dns_client_desc": "Hvis dette feltet holdes tomt, vil AdGuard Home bruke tjenerne som er satt opp i <0>DNS-innstillingene</0>.",
|
"upstream_dns_client_desc": "Hvis dette feltet holdes tomt, vil AdGuard Home bruke tjenerne som er satt opp i <0>DNS-innstillingene</0>.",
|
||||||
"tracker_source": "Sporerkilde",
|
"tracker_source": "Sporerkilde",
|
||||||
"source_label": "Kilde",
|
"source_label": "Kilde",
|
||||||
|
|
|
@ -222,7 +222,7 @@
|
||||||
"all_lists_up_to_date_toast": "Wszystkie listy są już aktualne",
|
"all_lists_up_to_date_toast": "Wszystkie listy są już aktualne",
|
||||||
"updated_upstream_dns_toast": "Serwery nadrzędne zostały pomyślnie zapisane",
|
"updated_upstream_dns_toast": "Serwery nadrzędne zostały pomyślnie zapisane",
|
||||||
"dns_test_ok_toast": "Określone serwery DNS działają poprawnie",
|
"dns_test_ok_toast": "Określone serwery DNS działają poprawnie",
|
||||||
"dns_test_not_ok_toast": "Serwer \"{{key}}\": nie można go użyć, sprawdź, czy napisałeś go poprawnie",
|
"dns_test_not_ok_toast": "Serwer \"{{key}}\": nie może być użyte, sprawdź, czy zapisano go poprawnie",
|
||||||
"dns_test_warning_toast": "Upstream \"{{key}}\" nie odpowiada na zapytania testowe i może nie działać prawidłowo",
|
"dns_test_warning_toast": "Upstream \"{{key}}\" nie odpowiada na zapytania testowe i może nie działać prawidłowo",
|
||||||
"unblock": "Odblokuj",
|
"unblock": "Odblokuj",
|
||||||
"block": "Zablokuj",
|
"block": "Zablokuj",
|
||||||
|
@ -346,7 +346,7 @@
|
||||||
"install_devices_windows_list_2": "Przejdź do kategorii Sieć i Internet, a następnie do Centrum sieci i udostępniania.",
|
"install_devices_windows_list_2": "Przejdź do kategorii Sieć i Internet, a następnie do Centrum sieci i udostępniania.",
|
||||||
"install_devices_windows_list_3": "W lewym panelu kliknij \"Zmień ustawienia adaptera\".",
|
"install_devices_windows_list_3": "W lewym panelu kliknij \"Zmień ustawienia adaptera\".",
|
||||||
"install_devices_windows_list_4": "Kliknij prawym przyciskiem myszy aktywne połączenie i wybierz Właściwości.",
|
"install_devices_windows_list_4": "Kliknij prawym przyciskiem myszy aktywne połączenie i wybierz Właściwości.",
|
||||||
"install_devices_windows_list_5": "Znajdź na liście \"Protokół internetowy w wersji 4 (TCP/IPv4)\" (lub w przypadku IPv6 \"Protokół internetowy w wersji 6 (TCP/IPv6)\"), zaznacz go i ponownie kliknij na Właściwości.",
|
"install_devices_windows_list_5": "Znajdź na liście \"Protokół internetowy w wersji 4 (TCP/IPv4)\" (lub w przypadku IPv6 \"Protokół internetowy w wersji 6 (TCP/IPv6)\"), zaznacz go i ponownie kliknij Właściwości.",
|
||||||
"install_devices_windows_list_6": "Wybierz opcję \"Użyj następujących adresów serwerów DNS\" i wprowadź adresy serwerów AdGuard Home.",
|
"install_devices_windows_list_6": "Wybierz opcję \"Użyj następujących adresów serwerów DNS\" i wprowadź adresy serwerów AdGuard Home.",
|
||||||
"install_devices_macos_list_1": "Kliknij ikonę Apple i przejdź do Preferencje systemowe.",
|
"install_devices_macos_list_1": "Kliknij ikonę Apple i przejdź do Preferencje systemowe.",
|
||||||
"install_devices_macos_list_2": "Kliknij Sieć.",
|
"install_devices_macos_list_2": "Kliknij Sieć.",
|
||||||
|
@ -396,7 +396,7 @@
|
||||||
"encryption_issuer": "Zgłaszający",
|
"encryption_issuer": "Zgłaszający",
|
||||||
"encryption_hostnames": "Nazwy hostów",
|
"encryption_hostnames": "Nazwy hostów",
|
||||||
"encryption_reset": "Czy na pewno chcesz zresetować ustawienia szyfrowania?",
|
"encryption_reset": "Czy na pewno chcesz zresetować ustawienia szyfrowania?",
|
||||||
"encryption_warning": "Uwaga!",
|
"encryption_warning": "Uwaga",
|
||||||
"topline_expiring_certificate": "Twój certyfikat SSL wkrótce wygaśnie. Zaktualizuj <0>Ustawienia szyfrowania</0>.",
|
"topline_expiring_certificate": "Twój certyfikat SSL wkrótce wygaśnie. Zaktualizuj <0>Ustawienia szyfrowania</0>.",
|
||||||
"topline_expired_certificate": "Twój certyfikat SSL wygasł. Zaktualizuj <0>Ustawienia szyfrowania</0>.",
|
"topline_expired_certificate": "Twój certyfikat SSL wygasł. Zaktualizuj <0>Ustawienia szyfrowania</0>.",
|
||||||
"form_error_port_range": "Wpisz numer portu z zakresu 80-65535",
|
"form_error_port_range": "Wpisz numer portu z zakresu 80-65535",
|
||||||
|
@ -542,7 +542,7 @@
|
||||||
"password_placeholder": "Wpisz hasło",
|
"password_placeholder": "Wpisz hasło",
|
||||||
"sign_in": "Zaloguj się",
|
"sign_in": "Zaloguj się",
|
||||||
"sign_out": "Wyloguj się",
|
"sign_out": "Wyloguj się",
|
||||||
"forgot_password": "Zapomniałeś hasła?",
|
"forgot_password": "Zapomniano hasła?",
|
||||||
"forgot_password_desc": "Wykonaj <0>te kroki</0>, aby utworzyć nowe hasło do konta użytkownika.",
|
"forgot_password_desc": "Wykonaj <0>te kroki</0>, aby utworzyć nowe hasło do konta użytkownika.",
|
||||||
"location": "Lokalizacja",
|
"location": "Lokalizacja",
|
||||||
"orgname": "Nazwa firmy",
|
"orgname": "Nazwa firmy",
|
||||||
|
|
|
@ -529,7 +529,7 @@
|
||||||
"ignore_domains": "Domínios ignorados (separados por nova linha)",
|
"ignore_domains": "Domínios ignorados (separados por nova linha)",
|
||||||
"ignore_domains_title": "Domínios ignorados",
|
"ignore_domains_title": "Domínios ignorados",
|
||||||
"ignore_domains_desc_stats": "As consultas para esses domínios não são gravadas nas estatísticas",
|
"ignore_domains_desc_stats": "As consultas para esses domínios não são gravadas nas estatísticas",
|
||||||
"ignore_domains_desc_query": "As consultas para esses domínios não são gravadas no log de consulta",
|
"ignore_domains_desc_query": "As consultas para esses domínios não são gravadas no registro de consulta",
|
||||||
"interval_hours": "{{count}} hora",
|
"interval_hours": "{{count}} hora",
|
||||||
"interval_hours_plural": "{{count}} horas",
|
"interval_hours_plural": "{{count}} horas",
|
||||||
"filters_configuration": "Configuração de filtros",
|
"filters_configuration": "Configuração de filtros",
|
||||||
|
|
|
@ -529,7 +529,7 @@
|
||||||
"ignore_domains": "Domínios ignorados (separados por nova linha)",
|
"ignore_domains": "Domínios ignorados (separados por nova linha)",
|
||||||
"ignore_domains_title": "Domínios ignorados",
|
"ignore_domains_title": "Domínios ignorados",
|
||||||
"ignore_domains_desc_stats": "As consultas para estes domínios não aparecem nas estatísticas",
|
"ignore_domains_desc_stats": "As consultas para estes domínios não aparecem nas estatísticas",
|
||||||
"ignore_domains_desc_query": "As consultas para estes domínios nãoaparecem no registo de consultas",
|
"ignore_domains_desc_query": "As consultas para estes domínios não aparecem no registo de consultas",
|
||||||
"interval_hours": "{{count}} hora",
|
"interval_hours": "{{count}} hora",
|
||||||
"interval_hours_plural": "{{count}} horas",
|
"interval_hours_plural": "{{count}} horas",
|
||||||
"filters_configuration": "Definição dos filtros",
|
"filters_configuration": "Definição dos filtros",
|
||||||
|
|
|
@ -167,6 +167,7 @@
|
||||||
"enabled_parental_toast": "«Батьківський контроль» увімкнено",
|
"enabled_parental_toast": "«Батьківський контроль» увімкнено",
|
||||||
"disabled_safe_search_toast": "Безпечний пошук вимкнено",
|
"disabled_safe_search_toast": "Безпечний пошук вимкнено",
|
||||||
"enabled_save_search_toast": "Безпечний пошук увімкнено",
|
"enabled_save_search_toast": "Безпечний пошук увімкнено",
|
||||||
|
"updated_save_search_toast": "Налаштування Безпечного пошуку оновлено",
|
||||||
"enabled_table_header": "Увімкнено",
|
"enabled_table_header": "Увімкнено",
|
||||||
"name_table_header": "Назва",
|
"name_table_header": "Назва",
|
||||||
"list_url_table_header": "URL списку",
|
"list_url_table_header": "URL списку",
|
||||||
|
@ -290,6 +291,8 @@
|
||||||
"rate_limit": "Обмеження швидкості",
|
"rate_limit": "Обмеження швидкості",
|
||||||
"edns_enable": "Увімкнути відправку EDNS Client Subnet",
|
"edns_enable": "Увімкнути відправку EDNS Client Subnet",
|
||||||
"edns_cs_desc": "Додавати параметр EDNS Client Subnet (ECS) до запитів до upstream-серверів, а також записувати в журнал значення, що надсилаються клієнтами.",
|
"edns_cs_desc": "Додавати параметр EDNS Client Subnet (ECS) до запитів до upstream-серверів, а також записувати в журнал значення, що надсилаються клієнтами.",
|
||||||
|
"edns_use_custom_ip": "Використання користувацької IP-адреси для EDNS",
|
||||||
|
"edns_use_custom_ip_desc": "Дозволити використовувати користувацьку IP-адресу для EDNS",
|
||||||
"rate_limit_desc": "Кількість запитів в секунду, які може робити один клієнт. Встановлене значення «0» означатиме необмежену кількість.",
|
"rate_limit_desc": "Кількість запитів в секунду, які може робити один клієнт. Встановлене значення «0» означатиме необмежену кількість.",
|
||||||
"blocking_ipv4_desc": "IP-адреса, яку потрібно видати для заблокованого A запиту",
|
"blocking_ipv4_desc": "IP-адреса, яку потрібно видати для заблокованого A запиту",
|
||||||
"blocking_ipv6_desc": "IP-адреса, яку потрібно видати для заблокованого АААА запиту",
|
"blocking_ipv6_desc": "IP-адреса, яку потрібно видати для заблокованого АААА запиту",
|
||||||
|
@ -523,6 +526,10 @@
|
||||||
"statistics_retention_confirm": "Ви впевнені, що хочете змінити тривалість статистики? Якщо зменшити значення інтервалу, деякі дані будуть втрачені",
|
"statistics_retention_confirm": "Ви впевнені, що хочете змінити тривалість статистики? Якщо зменшити значення інтервалу, деякі дані будуть втрачені",
|
||||||
"statistics_cleared": "Статистику успішно очищено",
|
"statistics_cleared": "Статистику успішно очищено",
|
||||||
"statistics_enable": "Увімкнути статистику",
|
"statistics_enable": "Увімкнути статистику",
|
||||||
|
"ignore_domains": "Ігноровані домени (по одному на рядок)",
|
||||||
|
"ignore_domains_title": "Ігноровані домени",
|
||||||
|
"ignore_domains_desc_stats": "Запити для цих доменів в статистику не пишуться",
|
||||||
|
"ignore_domains_desc_query": "Запити для цих доменів не записуються до журналу запитів",
|
||||||
"interval_hours": "{{count}} година",
|
"interval_hours": "{{count}} година",
|
||||||
"interval_hours_plural": "{{count}} годин(и)",
|
"interval_hours_plural": "{{count}} годин(и)",
|
||||||
"filters_configuration": "Конфігурація фільтрів",
|
"filters_configuration": "Конфігурація фільтрів",
|
||||||
|
@ -643,5 +650,29 @@
|
||||||
"confirm_dns_cache_clear": "Ви впевнені, що бажаєте очистити кеш DNS?",
|
"confirm_dns_cache_clear": "Ви впевнені, що бажаєте очистити кеш DNS?",
|
||||||
"cache_cleared": "Кеш DNS успішно очищено",
|
"cache_cleared": "Кеш DNS успішно очищено",
|
||||||
"clear_cache": "Очистити кеш",
|
"clear_cache": "Очистити кеш",
|
||||||
"protection_section_label": "Захист"
|
"make_static": "Зробити статичним",
|
||||||
|
"theme_auto_desc": "Автоматична (на основі теми вашого пристрою)",
|
||||||
|
"theme_dark_desc": "Темна тема",
|
||||||
|
"theme_light_desc": "Світла тема",
|
||||||
|
"disable_for_seconds": "На {{count}} секунду",
|
||||||
|
"disable_for_seconds_plural": "На {{count}} секунд",
|
||||||
|
"disable_for_minutes": "На {{count}} хвилину",
|
||||||
|
"disable_for_minutes_plural": "На {{count}} хвилин",
|
||||||
|
"disable_for_hours": "На {{count}} годину",
|
||||||
|
"disable_for_hours_plural": "На {{count}} годин",
|
||||||
|
"disable_until_tomorrow": "До завтра",
|
||||||
|
"disable_notify_for_seconds": "Вимкнення захисту на {{count}} секунду",
|
||||||
|
"disable_notify_for_seconds_plural": "Вимкнення захисту на {{count}} секунд",
|
||||||
|
"disable_notify_for_minutes": "Вимкнення захисту на {{count}} хвилину",
|
||||||
|
"disable_notify_for_minutes_plural": "Вимкнення захисту на {{count}} хвилин",
|
||||||
|
"disable_notify_for_hours": "Вимкнення захисту на {{count}} годину",
|
||||||
|
"disable_notify_for_hours_plural": "Вимкнення захисту на {{count}} годин",
|
||||||
|
"disable_notify_until_tomorrow": "Відключення захисту до завтра",
|
||||||
|
"enable_protection_timer": "Захист буде ввімкнено о {{time}}",
|
||||||
|
"custom_retention_input": "Введіть час в годинах",
|
||||||
|
"custom_rotation_input": "Введіть час в годинах",
|
||||||
|
"protection_section_label": "Захист",
|
||||||
|
"log_and_stats_section_label": "Журнал запитів і статистика",
|
||||||
|
"ignore_query_log": "Ігнорувати цей клієнт у журналі запитів",
|
||||||
|
"ignore_statistics": "Ігноруйте цей клієнт в статистиці"
|
||||||
}
|
}
|
||||||
|
|
|
@ -100,6 +100,12 @@ export default {
|
||||||
"homepage": "https://github.com/DandelionSprout/adfilt",
|
"homepage": "https://github.com/DandelionSprout/adfilt",
|
||||||
"source": "https://adguardteam.github.io/HostlistsRegistry/assets/filter_13.txt"
|
"source": "https://adguardteam.github.io/HostlistsRegistry/assets/filter_13.txt"
|
||||||
},
|
},
|
||||||
|
"POL_cert_polska_list_of_malicious_domains": {
|
||||||
|
"name": "POL: CERT Polska List of malicious domains",
|
||||||
|
"categoryId": "regional",
|
||||||
|
"homepage": "https://cert.pl/posts/2020/03/ostrzezenia_phishing/",
|
||||||
|
"source": "https://adguardteam.github.io/HostlistsRegistry/assets/filter_41.txt"
|
||||||
|
},
|
||||||
"POL_polish_filters_for_pi_hole": {
|
"POL_polish_filters_for_pi_hole": {
|
||||||
"name": "POL: Polish filters for Pi-hole",
|
"name": "POL: Polish filters for Pi-hole",
|
||||||
"categoryId": "regional",
|
"categoryId": "regional",
|
||||||
|
@ -118,6 +124,12 @@ export default {
|
||||||
"homepage": "https://github.com/bkrucarci/turk-adlist",
|
"homepage": "https://github.com/bkrucarci/turk-adlist",
|
||||||
"source": "https://adguardteam.github.io/HostlistsRegistry/assets/filter_26.txt"
|
"source": "https://adguardteam.github.io/HostlistsRegistry/assets/filter_26.txt"
|
||||||
},
|
},
|
||||||
|
"TUR_turkish_ad_hosts": {
|
||||||
|
"name": "TUR: Turkish Ad Hosts",
|
||||||
|
"categoryId": "regional",
|
||||||
|
"homepage": "https://github.com/symbuzzer/Turkish-Ad-Hosts",
|
||||||
|
"source": "https://adguardteam.github.io/HostlistsRegistry/assets/filter_40.txt"
|
||||||
|
},
|
||||||
"VNM_abpvn": {
|
"VNM_abpvn": {
|
||||||
"name": "VNM: ABPVN List",
|
"name": "VNM: ABPVN List",
|
||||||
"categoryId": "regional",
|
"categoryId": "regional",
|
||||||
|
@ -214,6 +226,12 @@ export default {
|
||||||
"homepage": "https://github.com/durablenapkin/scamblocklist",
|
"homepage": "https://github.com/durablenapkin/scamblocklist",
|
||||||
"source": "https://adguardteam.github.io/HostlistsRegistry/assets/filter_10.txt"
|
"source": "https://adguardteam.github.io/HostlistsRegistry/assets/filter_10.txt"
|
||||||
},
|
},
|
||||||
|
"shadowwhisperers_malware_list": {
|
||||||
|
"name": "ShadowWhisperer's Malware List",
|
||||||
|
"categoryId": "security",
|
||||||
|
"homepage": "https://github.com/ShadowWhisperer/BlockLists",
|
||||||
|
"source": "https://adguardteam.github.io/HostlistsRegistry/assets/filter_42.txt"
|
||||||
|
},
|
||||||
"staklerware_indicators_list": {
|
"staklerware_indicators_list": {
|
||||||
"name": "Stalkerware Indicators List",
|
"name": "Stalkerware Indicators List",
|
||||||
"categoryId": "security",
|
"categoryId": "security",
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
{
|
{
|
||||||
"timeUpdated": "2023-04-06T10:46:09.881Z",
|
"timeUpdated": "2023-06-01T00:12:12.660Z",
|
||||||
"categories": {
|
"categories": {
|
||||||
"0": "audio_video_player",
|
"0": "audio_video_player",
|
||||||
"1": "comments",
|
"1": "comments",
|
||||||
|
@ -19526,6 +19526,13 @@
|
||||||
"companyId": "qualcomm",
|
"companyId": "qualcomm",
|
||||||
"source": "AdGuard"
|
"source": "AdGuard"
|
||||||
},
|
},
|
||||||
|
"qualcomm_location_service": {
|
||||||
|
"name": "Qualcomm Location Service",
|
||||||
|
"categoryId": 15,
|
||||||
|
"url": "https://www.qualcomm.com/site/privacy/services",
|
||||||
|
"companyId": "qualcomm",
|
||||||
|
"source": "AdGuard"
|
||||||
|
},
|
||||||
"recaptcha": {
|
"recaptcha": {
|
||||||
"name": "reCAPTCHA",
|
"name": "reCAPTCHA",
|
||||||
"categoryId": 8,
|
"categoryId": 8,
|
||||||
|
@ -19533,6 +19540,55 @@
|
||||||
"companyId": "google",
|
"companyId": "google",
|
||||||
"source": "AdGuard"
|
"source": "AdGuard"
|
||||||
},
|
},
|
||||||
|
"samsung": {
|
||||||
|
"name": "Samsung",
|
||||||
|
"categoryId": 8,
|
||||||
|
"url": "https://www.samsung.com/",
|
||||||
|
"companyId": "samsung",
|
||||||
|
"source": "AdGuard"
|
||||||
|
},
|
||||||
|
"samsungads": {
|
||||||
|
"name": "Samsung Ads",
|
||||||
|
"categoryId": 4,
|
||||||
|
"url": "https://www.samsung.com/business/samsungads/",
|
||||||
|
"companyId": "samsung",
|
||||||
|
"source": "AdGuard"
|
||||||
|
},
|
||||||
|
"samsungapps": {
|
||||||
|
"name": "Samsung Apps",
|
||||||
|
"categoryId": 101,
|
||||||
|
"url": "https://www.samsung.com/au/apps/",
|
||||||
|
"companyId": "samsung",
|
||||||
|
"source": "AdGuard"
|
||||||
|
},
|
||||||
|
"samsungmobile": {
|
||||||
|
"name": "Samsung Mobile",
|
||||||
|
"categoryId": 101,
|
||||||
|
"url": "https://www.samsung.com/mobile/",
|
||||||
|
"companyId": "samsung",
|
||||||
|
"source": "AdGuard"
|
||||||
|
},
|
||||||
|
"samsungpush": {
|
||||||
|
"name": "Samsung Push",
|
||||||
|
"categoryId": 8,
|
||||||
|
"url": null,
|
||||||
|
"companyId": "samsung",
|
||||||
|
"source": "AdGuard"
|
||||||
|
},
|
||||||
|
"samsungsds": {
|
||||||
|
"name": "Samsung SDS",
|
||||||
|
"categoryId": 10,
|
||||||
|
"url": "https://www.samsungsds.com/",
|
||||||
|
"companyId": "samsung",
|
||||||
|
"source": "AdGuard"
|
||||||
|
},
|
||||||
|
"samsungtv": {
|
||||||
|
"name": "Samsung TV",
|
||||||
|
"categoryId": 15,
|
||||||
|
"url": "https://www.samsung.com/au/tvs/",
|
||||||
|
"companyId": "samsung",
|
||||||
|
"source": "AdGuard"
|
||||||
|
},
|
||||||
"sectigo": {
|
"sectigo": {
|
||||||
"name": "Sectigo Limited",
|
"name": "Sectigo Limited",
|
||||||
"categoryId": 5,
|
"categoryId": 5,
|
||||||
|
@ -19589,6 +19645,13 @@
|
||||||
"companyId": "telstra",
|
"companyId": "telstra",
|
||||||
"source": "AdGuard"
|
"source": "AdGuard"
|
||||||
},
|
},
|
||||||
|
"ubuntu": {
|
||||||
|
"name": "Ubuntu",
|
||||||
|
"categoryId": 8,
|
||||||
|
"url": "https://ubuntu.com/",
|
||||||
|
"companyId": "ubuntu",
|
||||||
|
"source": "AdGuard"
|
||||||
|
},
|
||||||
"unity_ads": {
|
"unity_ads": {
|
||||||
"name": "Unity Ads",
|
"name": "Unity Ads",
|
||||||
"categoryId": 4,
|
"categoryId": 4,
|
||||||
|
@ -19651,6 +19714,13 @@
|
||||||
"url": "https://www.3gpp.org/",
|
"url": "https://www.3gpp.org/",
|
||||||
"companyId": "3gpp",
|
"companyId": "3gpp",
|
||||||
"source": "AdGuard"
|
"source": "AdGuard"
|
||||||
|
},
|
||||||
|
"7plus": {
|
||||||
|
"name": "7plus",
|
||||||
|
"categoryId": 0,
|
||||||
|
"url": "https://7plus.com.au/",
|
||||||
|
"companyId": "7plus",
|
||||||
|
"source": "AdGuard"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"trackerDomains": {
|
"trackerDomains": {
|
||||||
|
@ -19843,8 +19913,8 @@
|
||||||
"adfreestyle.pl": "adfreestyle",
|
"adfreestyle.pl": "adfreestyle",
|
||||||
"adfront.org": "adfront",
|
"adfront.org": "adfront",
|
||||||
"adfrontiers.com": "adfrontiers",
|
"adfrontiers.com": "adfrontiers",
|
||||||
"adgear.com": "adgear",
|
"adgear.com": "samsungads",
|
||||||
"adgrx.com": "adgear",
|
"adgrx.com": "samsungads",
|
||||||
"adgebra.co.in": "adgebra",
|
"adgebra.co.in": "adgebra",
|
||||||
"adgenie.co.uk": "adgenie",
|
"adgenie.co.uk": "adgenie",
|
||||||
"ad.adgile.com": "adgile",
|
"ad.adgile.com": "adgile",
|
||||||
|
@ -24056,6 +24126,10 @@
|
||||||
"safebrowsing.g.applimg.com": "apple",
|
"safebrowsing.g.applimg.com": "apple",
|
||||||
"applvn.com": "applovin",
|
"applvn.com": "applovin",
|
||||||
"applovin.com": "applovin",
|
"applovin.com": "applovin",
|
||||||
|
"bitbucket.org": "atlassian.net",
|
||||||
|
"jira.com": "atlassian.net",
|
||||||
|
"ss-inf.net": "atlassian.net",
|
||||||
|
"stspg-customer.com": "statuspage.io",
|
||||||
"blob.core.windows.net": "azure_blob_storage",
|
"blob.core.windows.net": "azure_blob_storage",
|
||||||
"azure.com": "azure",
|
"azure.com": "azure",
|
||||||
"trafficmanager.net": "azure",
|
"trafficmanager.net": "azure",
|
||||||
|
@ -24063,6 +24137,21 @@
|
||||||
"mobileapptracking.com": "branch",
|
"mobileapptracking.com": "branch",
|
||||||
"bttn.io": "button",
|
"bttn.io": "button",
|
||||||
"cloudflare-dns.com": "cloudflare",
|
"cloudflare-dns.com": "cloudflare",
|
||||||
|
"cloudflare-dm-cmpimg.com": "cloudflare",
|
||||||
|
"cloudflare-ipfs.com": "cloudflare",
|
||||||
|
"cloudflare-quic.com": "cloudflare",
|
||||||
|
"cloudflare-terms-of-service-abuse.com": "cloudflare",
|
||||||
|
"cloudflare.tv": "cloudflare",
|
||||||
|
"cloudflareaccess.com": "cloudflare",
|
||||||
|
"cloudflareclient.com": "cloudflare",
|
||||||
|
"cloudflareinsights.com": "cloudflare",
|
||||||
|
"cloudflareok.com": "cloudflare",
|
||||||
|
"cloudflareportal.com": "cloudflare",
|
||||||
|
"cloudflareresolve.com": "cloudflare",
|
||||||
|
"cloudflaressl.com": "cloudflare",
|
||||||
|
"cloudflarestatus.com": "cloudflare",
|
||||||
|
"pacloudflare.com": "cloudflare",
|
||||||
|
"sn-cloudflare.com": "cloudflare",
|
||||||
"crashlytics.com": "crashlytics",
|
"crashlytics.com": "crashlytics",
|
||||||
"phicdn.net": "digicert_trust_seal",
|
"phicdn.net": "digicert_trust_seal",
|
||||||
"domain.glass": "domainglass",
|
"domain.glass": "domainglass",
|
||||||
|
@ -24092,6 +24181,9 @@
|
||||||
"qy.net": "iqiyi",
|
"qy.net": "iqiyi",
|
||||||
"iqiyi.com": "iqiyi",
|
"iqiyi.com": "iqiyi",
|
||||||
"iq.com": "iqiyi",
|
"iq.com": "iqiyi",
|
||||||
|
"ironsrc.com": "ironsource",
|
||||||
|
"ironsrc.net": "ironsource",
|
||||||
|
"supersonicads.com": "ironsource",
|
||||||
"karambasecurity.com": "karambasecurity",
|
"karambasecurity.com": "karambasecurity",
|
||||||
"kik.com": "kik",
|
"kik.com": "kik",
|
||||||
"apikik.com": "kik",
|
"apikik.com": "kik",
|
||||||
|
@ -24121,6 +24213,23 @@
|
||||||
"mozilla.com": "mozilla",
|
"mozilla.com": "mozilla",
|
||||||
"mozilla.net": "mozilla",
|
"mozilla.net": "mozilla",
|
||||||
"mozilla.org": "mozilla",
|
"mozilla.org": "mozilla",
|
||||||
|
"flxvpn.net": "netflix",
|
||||||
|
"netflix.ca": "netflix",
|
||||||
|
"netflix.com.au": "netflix",
|
||||||
|
"netflix.net": "netflix",
|
||||||
|
"netflixdnstest1.com": "netflix",
|
||||||
|
"netflixdnstest10.com": "netflix",
|
||||||
|
"netflixdnstest2.com": "netflix",
|
||||||
|
"netflixdnstest3.com": "netflix",
|
||||||
|
"netflixdnstest4.com": "netflix",
|
||||||
|
"netflixdnstest5.com": "netflix",
|
||||||
|
"netflixdnstest6.com": "netflix",
|
||||||
|
"netflixdnstest7.com": "netflix",
|
||||||
|
"netflixdnstest8.com": "netflix",
|
||||||
|
"netflixdnstest9.com": "netflix",
|
||||||
|
"netflixinvestor.com": "netflix",
|
||||||
|
"netflixstudios.com": "netflix",
|
||||||
|
"netflixtechblog.com": "netflix",
|
||||||
"nflximg.com": "netflix",
|
"nflximg.com": "netflix",
|
||||||
"netify.ai": "netify",
|
"netify.ai": "netify",
|
||||||
"nab.com": "nab",
|
"nab.com": "nab",
|
||||||
|
@ -24144,9 +24253,69 @@
|
||||||
"oztam.com.au": "oztam",
|
"oztam.com.au": "oztam",
|
||||||
"plex.tv": "plex",
|
"plex.tv": "plex",
|
||||||
"plex.direct": "plex",
|
"plex.direct": "plex",
|
||||||
"xtracloud.net": "qualcomm",
|
|
||||||
"qualcomm.com": "qualcomm",
|
"qualcomm.com": "qualcomm",
|
||||||
|
"gpsonextra.net": "qualcomm_location_service",
|
||||||
|
"izatcloud.net": "qualcomm_location_service",
|
||||||
|
"xtracloud.net": "qualcomm_location_service",
|
||||||
"recaptcha.net": "recaptcha",
|
"recaptcha.net": "recaptcha",
|
||||||
|
"samsungacr.com": "samsungads",
|
||||||
|
"samsungadhub.com": "samsungads",
|
||||||
|
"samsungads.com": "samsungads",
|
||||||
|
"samsungtifa.com": "samsungads",
|
||||||
|
"game-mode.net": "samsung",
|
||||||
|
"gos-gsp.io": "samsung",
|
||||||
|
"lldns.net": "samsung",
|
||||||
|
"pavv.co.kr": "samsung",
|
||||||
|
"remotesamsung.com": "samsung",
|
||||||
|
"samsung-gamelauncher.com": "samsung",
|
||||||
|
"samsung.co.kr": "samsung",
|
||||||
|
"samsung.com": "samsung",
|
||||||
|
"samsung.com.cn": "samsung",
|
||||||
|
"samsungcloud.com": "samsung",
|
||||||
|
"samsungcloudcdn.com": "samsung",
|
||||||
|
"samsungcloudprint.com": "samsung",
|
||||||
|
"samsungcloudsolution.com": "samsung",
|
||||||
|
"samsungcloudsolution.net": "samsung",
|
||||||
|
"samsungelectronics.com": "samsung",
|
||||||
|
"samsunghealth.com": "samsung",
|
||||||
|
"samsungiotcloud.com": "samsung",
|
||||||
|
"samsungknox.com": "samsung",
|
||||||
|
"samsungnyc.com": "samsung",
|
||||||
|
"samsungosp.com": "samsung",
|
||||||
|
"samsungotn.net": "samsung",
|
||||||
|
"samsungpositioning.com": "samsung",
|
||||||
|
"samsungqbe.com": "samsung",
|
||||||
|
"samsungrm.net": "samsung",
|
||||||
|
"samsungrs.com": "samsung",
|
||||||
|
"samsungsemi.com": "samsung",
|
||||||
|
"samsungsetup.com": "samsung",
|
||||||
|
"samsungusa.com": "samsung",
|
||||||
|
"secb2b.com": "samsung",
|
||||||
|
"smartthings.com": "samsung",
|
||||||
|
"ospserver.net": "samsungmobile",
|
||||||
|
"samsungdms.net": "samsungmobile",
|
||||||
|
"samsungmax.com": "samsungmobile",
|
||||||
|
"samsungmobile.com": "samsungmobile",
|
||||||
|
"secmobilesvc.com": "samsungmobile",
|
||||||
|
"internetat.tv": "samsungtv",
|
||||||
|
"samsungcloud.tv": "samsungtv",
|
||||||
|
"samsungsds.com": "samsungsds",
|
||||||
|
"push.samsungosp.com": "samsungpush",
|
||||||
|
"pushmessage.samsung.com": "samsungpush",
|
||||||
|
"scs.samsungqbe.com": "samsungpush",
|
||||||
|
"ssp.samsung.com": "samsungpush",
|
||||||
|
"aibixby.com": "samsungapps",
|
||||||
|
"findmymobile.samsung.com": "samsungapps",
|
||||||
|
"samsapps.cust.lldns.net": "samsungapps",
|
||||||
|
"samsung-omc.com": "samsungapps",
|
||||||
|
"samsungapps.com": "samsungapps",
|
||||||
|
"samsungdiroute.net": "samsungapps",
|
||||||
|
"samsungdive.com": "samsungapps",
|
||||||
|
"samsungdm.com": "samsungapps",
|
||||||
|
"samsungdmroute.com": "samsungapps",
|
||||||
|
"samsungmdec.com": "samsungapps",
|
||||||
|
"samsungvisioncloud.com": "samsungapps",
|
||||||
|
"sbixby.com": "samsungapps",
|
||||||
"sectigo.com": "sectigo",
|
"sectigo.com": "sectigo",
|
||||||
"showrss.info": "showrss",
|
"showrss.info": "showrss",
|
||||||
"similarweb.io": "similarweb",
|
"similarweb.io": "similarweb",
|
||||||
|
@ -24171,6 +24340,13 @@
|
||||||
"telstra.com.au": "telstra",
|
"telstra.com.au": "telstra",
|
||||||
"telstra.com": "telstra",
|
"telstra.com": "telstra",
|
||||||
"usertrust.com": "trustlogo",
|
"usertrust.com": "trustlogo",
|
||||||
|
"canonical.com": "ubuntu",
|
||||||
|
"launchpad.net": "ubuntu",
|
||||||
|
"launchpadcontent.net": "ubuntu",
|
||||||
|
"snapcraft.io": "ubuntu",
|
||||||
|
"snapcraftcontent.com": "ubuntu",
|
||||||
|
"ubuntu.com": "ubuntu",
|
||||||
|
"ubuntucompanyservices.co.za": "ubuntu",
|
||||||
"unityads.unity3d.com": "unity_ads",
|
"unityads.unity3d.com": "unity_ads",
|
||||||
"exp-tas.com": "vscode",
|
"exp-tas.com": "vscode",
|
||||||
"vscode-unpkg.net": "vscode",
|
"vscode-unpkg.net": "vscode",
|
||||||
|
@ -24190,6 +24366,7 @@
|
||||||
"yandex.kz": "yandex",
|
"yandex.kz": "yandex",
|
||||||
"appmetrica.yandex.com": "yandex_appmetrica",
|
"appmetrica.yandex.com": "yandex_appmetrica",
|
||||||
"3gppnetwork.org": "3gpp",
|
"3gppnetwork.org": "3gpp",
|
||||||
"3gpp.org": "3gpp"
|
"3gpp.org": "3gpp",
|
||||||
|
"swm.digital": "7plus"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,11 +7,10 @@
|
||||||
addrs[$2] = true
|
addrs[$2] = true
|
||||||
prev_line = FNR
|
prev_line = FNR
|
||||||
|
|
||||||
if ($2 == "0.0.0.0" || $2 == "::") {
|
if ($2 == "0.0.0.0" || $2 == "'::'") {
|
||||||
delete addrs
|
|
||||||
addrs["localhost"] = true
|
|
||||||
|
|
||||||
# Drop all the other addresses.
|
# Drop all the other addresses.
|
||||||
|
delete addrs
|
||||||
|
addrs[""] = true
|
||||||
prev_line = -1
|
prev_line = -1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,8 +61,11 @@ then
|
||||||
error_exit "no DNS bindings could be retrieved from $filename"
|
error_exit "no DNS bindings could be retrieved from $filename"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
first_dns="$( echo "$dns_hosts" | head -n 1 )"
|
||||||
|
readonly first_dns
|
||||||
|
|
||||||
# TODO(e.burkov): Deal with 0 port.
|
# TODO(e.burkov): Deal with 0 port.
|
||||||
case "$( echo "$dns_hosts" | head -n 1 )"
|
case "$first_dns"
|
||||||
in
|
in
|
||||||
(*':0')
|
(*':0')
|
||||||
error_exit '0 in DNS port is not supported by healthcheck'
|
error_exit '0 in DNS port is not supported by healthcheck'
|
||||||
|
@ -82,8 +85,23 @@ esac
|
||||||
# See https://github.com/AdguardTeam/AdGuardHome/issues/5642.
|
# See https://github.com/AdguardTeam/AdGuardHome/issues/5642.
|
||||||
wget --no-check-certificate "$web_url" -O /dev/null -q || exit 1
|
wget --no-check-certificate "$web_url" -O /dev/null -q || exit 1
|
||||||
|
|
||||||
echo "$dns_hosts" | while read -r host
|
test_fqdn="healthcheck.adguardhome.test."
|
||||||
do
|
readonly test_fqdn
|
||||||
nslookup -type=a healthcheck.adguardhome.test. "$host" > /dev/null ||\
|
|
||||||
|
# The awk script currently returns only port prefixed with colon in case of
|
||||||
|
# unspecified address.
|
||||||
|
case "$first_dns"
|
||||||
|
in
|
||||||
|
(':'*)
|
||||||
|
nslookup -type=a "$test_fqdn" "127.0.0.1${first_dns}" > /dev/null ||\
|
||||||
|
nslookup -type=a "$test_fqdn" "[::1]${first_dns}" > /dev/null ||\
|
||||||
error_exit "nslookup failed for $host"
|
error_exit "nslookup failed for $host"
|
||||||
done
|
;;
|
||||||
|
(*)
|
||||||
|
echo "$dns_hosts" | while read -r host
|
||||||
|
do
|
||||||
|
nslookup -type=a "$test_fqdn" "$host" > /dev/null ||\
|
||||||
|
error_exit "nslookup failed for $host"
|
||||||
|
done
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
30
go.mod
30
go.mod
|
@ -3,7 +3,7 @@ module github.com/AdguardTeam/AdGuardHome
|
||||||
go 1.19
|
go 1.19
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/AdguardTeam/dnsproxy v0.48.3
|
github.com/AdguardTeam/dnsproxy v0.50.0
|
||||||
github.com/AdguardTeam/golibs v0.13.2
|
github.com/AdguardTeam/golibs v0.13.2
|
||||||
github.com/AdguardTeam/urlfilter v0.16.1
|
github.com/AdguardTeam/urlfilter v0.16.1
|
||||||
github.com/NYTimes/gziphandler v1.1.1
|
github.com/NYTimes/gziphandler v1.1.1
|
||||||
|
@ -16,24 +16,24 @@ require (
|
||||||
github.com/google/gopacket v1.1.19
|
github.com/google/gopacket v1.1.19
|
||||||
github.com/google/renameio v1.0.1
|
github.com/google/renameio v1.0.1
|
||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/insomniacslk/dhcp v0.0.0-20221215072855-de60144f33f8
|
github.com/insomniacslk/dhcp v0.0.0-20230516061539-49801966e6cb
|
||||||
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86
|
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86
|
||||||
github.com/kardianos/service v1.2.2
|
github.com/kardianos/service v1.2.2
|
||||||
github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118
|
github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118
|
||||||
github.com/mdlayher/netlink v1.7.1
|
github.com/mdlayher/netlink v1.7.2
|
||||||
github.com/mdlayher/packet v1.1.1
|
github.com/mdlayher/packet v1.1.2
|
||||||
// TODO(a.garipov): This package is deprecated; find a new one or use our
|
// TODO(a.garipov): This package is deprecated; find a new one or use our
|
||||||
// own code for that. Perhaps, use gopacket.
|
// own code for that. Perhaps, use gopacket.
|
||||||
github.com/mdlayher/raw v0.1.0
|
github.com/mdlayher/raw v0.1.0
|
||||||
github.com/miekg/dns v1.1.53
|
github.com/miekg/dns v1.1.54
|
||||||
github.com/quic-go/quic-go v0.33.0
|
github.com/quic-go/quic-go v0.35.1
|
||||||
github.com/stretchr/testify v1.8.2
|
github.com/stretchr/testify v1.8.2
|
||||||
github.com/ti-mo/netfilter v0.5.0
|
github.com/ti-mo/netfilter v0.5.0
|
||||||
go.etcd.io/bbolt v1.3.7
|
go.etcd.io/bbolt v1.3.7
|
||||||
golang.org/x/crypto v0.8.0
|
golang.org/x/crypto v0.9.0
|
||||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29
|
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
|
||||||
golang.org/x/net v0.9.0
|
golang.org/x/net v0.10.0
|
||||||
golang.org/x/sys v0.7.0
|
golang.org/x/sys v0.8.0
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
howett.net/plist v1.0.0
|
howett.net/plist v1.0.0
|
||||||
|
@ -48,9 +48,9 @@ require (
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||||
github.com/golang/mock v1.6.0 // indirect
|
github.com/golang/mock v1.6.0 // indirect
|
||||||
github.com/google/pprof v0.0.0-20230406165453-00490a63f317 // indirect
|
github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 // indirect
|
||||||
github.com/mdlayher/socket v0.4.0 // indirect
|
github.com/mdlayher/socket v0.4.1 // indirect
|
||||||
github.com/onsi/ginkgo/v2 v2.9.2 // indirect
|
github.com/onsi/ginkgo/v2 v2.10.0 // indirect
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
|
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
|
||||||
github.com/pierrec/lz4/v4 v4.1.17 // indirect
|
github.com/pierrec/lz4/v4 v4.1.17 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
|
@ -60,7 +60,7 @@ require (
|
||||||
github.com/quic-go/qtls-go1-20 v0.2.2 // indirect
|
github.com/quic-go/qtls-go1-20 v0.2.2 // indirect
|
||||||
github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 // indirect
|
github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 // indirect
|
||||||
golang.org/x/mod v0.10.0 // indirect
|
golang.org/x/mod v0.10.0 // indirect
|
||||||
golang.org/x/sync v0.1.0 // indirect
|
golang.org/x/sync v0.2.0 // indirect
|
||||||
golang.org/x/text v0.9.0 // indirect
|
golang.org/x/text v0.9.0 // indirect
|
||||||
golang.org/x/tools v0.8.0 // indirect
|
golang.org/x/tools v0.9.3 // indirect
|
||||||
)
|
)
|
||||||
|
|
103
go.sum
103
go.sum
|
@ -1,5 +1,5 @@
|
||||||
github.com/AdguardTeam/dnsproxy v0.48.3 h1:h9xgDSmd1MqsPFNApyaPVXolmSTtzOWOcfWvPeDEP6s=
|
github.com/AdguardTeam/dnsproxy v0.50.0 h1:gqImxUMBVS8VQmGdXw0U7MjJNVzXkYaZ9NM5TKl3JBU=
|
||||||
github.com/AdguardTeam/dnsproxy v0.48.3/go.mod h1:Y7g7jRTd/u7+KJ/QvnGI2PCE8vnisp6EsW47/Sz0DZw=
|
github.com/AdguardTeam/dnsproxy v0.50.0/go.mod h1:CQhZTkqC8X0ID6glrtyaxgqRRdiYfn1gJulC1cZ5Dn8=
|
||||||
github.com/AdguardTeam/golibs v0.4.0/go.mod h1:skKsDKIBB7kkFflLJBpfGX+G8QFTx0WKUzB6TIgtUj4=
|
github.com/AdguardTeam/golibs v0.4.0/go.mod h1:skKsDKIBB7kkFflLJBpfGX+G8QFTx0WKUzB6TIgtUj4=
|
||||||
github.com/AdguardTeam/golibs v0.10.4/go.mod h1:rSfQRGHIdgfxriDDNgNJ7HmE5zRoURq8R+VdR81Zuzw=
|
github.com/AdguardTeam/golibs v0.10.4/go.mod h1:rSfQRGHIdgfxriDDNgNJ7HmE5zRoURq8R+VdR81Zuzw=
|
||||||
github.com/AdguardTeam/golibs v0.13.2 h1:BPASsyQKmb+b8VnvsNOHp7bKfcZl9Z+Z2UhPjOiupSc=
|
github.com/AdguardTeam/golibs v0.13.2 h1:BPASsyQKmb+b8VnvsNOHp7bKfcZl9Z+Z2UhPjOiupSc=
|
||||||
|
@ -31,10 +31,9 @@ github.com/digineo/go-ipset/v2 v2.2.1 h1:k6skY+0fMqeUjjeWO/m5OuWPSZUAn7AucHMnQ1M
|
||||||
github.com/digineo/go-ipset/v2 v2.2.1/go.mod h1:wBsNzJlZlABHUITkesrggFnZQtgW5wkqw1uo8Qxe0VU=
|
github.com/digineo/go-ipset/v2 v2.2.1/go.mod h1:wBsNzJlZlABHUITkesrggFnZQtgW5wkqw1uo8Qxe0VU=
|
||||||
github.com/dimfeld/httptreemux/v5 v5.5.0 h1:p8jkiMrCuZ0CmhwYLcbNbl7DDo21fozhKHQ2PccwOFQ=
|
github.com/dimfeld/httptreemux/v5 v5.5.0 h1:p8jkiMrCuZ0CmhwYLcbNbl7DDo21fozhKHQ2PccwOFQ=
|
||||||
github.com/dimfeld/httptreemux/v5 v5.5.0/go.mod h1:QeEylH57C0v3VO0tkKraVz9oD3Uu93CKPnTLbsidvSw=
|
github.com/dimfeld/httptreemux/v5 v5.5.0/go.mod h1:QeEylH57C0v3VO0tkKraVz9oD3Uu93CKPnTLbsidvSw=
|
||||||
github.com/fanliao/go-promise v0.0.0-20141029170127-1890db352a72/go.mod h1:PjfxuH4FZdUyfMdtBio2lsRr1AKEaVPwelzuHuh8Lqc=
|
|
||||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||||
github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY=
|
github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY=
|
||||||
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||||
github.com/go-ping/ping v1.1.0 h1:3MCGhVX4fyEUuhsfwPrsEdQw6xspHkv5zHsiSoDFZYw=
|
github.com/go-ping/ping v1.1.0 h1:3MCGhVX4fyEUuhsfwPrsEdQw6xspHkv5zHsiSoDFZYw=
|
||||||
|
@ -45,72 +44,54 @@ github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
|
||||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
|
||||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
|
||||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||||
github.com/google/pprof v0.0.0-20230406165453-00490a63f317 h1:hFhpt7CTmR3DX+b4R19ydQFtofxT0Sv3QsKNMVQYTMQ=
|
github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 h1:hR7/MlvK23p6+lIw9SN1TigNLn9ZnF3W4SYRKq2gAHs=
|
||||||
github.com/google/pprof v0.0.0-20230406165453-00490a63f317/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk=
|
github.com/google/pprof v0.0.0-20230602150820-91b7bce49751/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA=
|
||||||
github.com/google/renameio v1.0.1 h1:Lh/jXZmvZxb0BBeSY5VKEfidcbcbenKjZFzM/q0fSeU=
|
github.com/google/renameio v1.0.1 h1:Lh/jXZmvZxb0BBeSY5VKEfidcbcbenKjZFzM/q0fSeU=
|
||||||
github.com/google/renameio v1.0.1/go.mod h1:t/HQoYBZSsWSNK35C6CO/TpPLDVWvxOHboWUAweKUpk=
|
github.com/google/renameio v1.0.1/go.mod h1:t/HQoYBZSsWSNK35C6CO/TpPLDVWvxOHboWUAweKUpk=
|
||||||
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
|
||||||
github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714 h1:/jC7qQFrv8CrSJVmaolDVOxTfS9kc36uB6H40kdbQq8=
|
github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714 h1:/jC7qQFrv8CrSJVmaolDVOxTfS9kc36uB6H40kdbQq8=
|
||||||
github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714/go.mod h1:2Goc3h8EklBH5mspfHFxBnEoURQCGzQQH1ga9Myjvis=
|
github.com/insomniacslk/dhcp v0.0.0-20230516061539-49801966e6cb h1:6fDKEAXwe3rsfS4khW3EZ8kEqmSiV9szhMPcDrD+Y7Q=
|
||||||
github.com/insomniacslk/dhcp v0.0.0-20221215072855-de60144f33f8 h1:Z72DOke2yOK0Ms4Z2LK1E1OrRJXOxSj5DllTz2FYTRg=
|
github.com/insomniacslk/dhcp v0.0.0-20230516061539-49801966e6cb/go.mod h1:7474bZ1YNCvarT6WFKie4kEET6J0KYRDC4XJqqXzQW4=
|
||||||
github.com/insomniacslk/dhcp v0.0.0-20221215072855-de60144f33f8/go.mod h1:m5WMe03WCvWcXjRnhvaAbAAXdCnu20J5P+mmH44ZzpE=
|
|
||||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||||
github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
|
github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
|
||||||
github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
|
github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
|
||||||
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk=
|
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk=
|
||||||
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8=
|
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8=
|
||||||
github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw=
|
|
||||||
github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ=
|
|
||||||
github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok=
|
|
||||||
github.com/jsimonetti/rtnetlink v0.0.0-20201110080708-d2c240429e6c/go.mod h1:huN4d1phzjhlOsNIjFsw2SVRbwIHj3fJDMEU2SDPTmg=
|
|
||||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
|
||||||
github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX60=
|
github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX60=
|
||||||
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
|
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/mdlayher/ethernet v0.0.0-20190606142754-0394541c37b7/go.mod h1:U6ZQobyTjI/tJyq2HG+i/dfSoFUt8/aZCM+GKtmFk/Y=
|
|
||||||
github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 h1:2oDp6OOhLxQ9JBoUuysVz9UZ9uI6oLUbvAZu0x8o+vE=
|
github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 h1:2oDp6OOhLxQ9JBoUuysVz9UZ9uI6oLUbvAZu0x8o+vE=
|
||||||
github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118/go.mod h1:ZFUnHIVchZ9lJoWoEGUg8Q3M4U8aNNWA3CVSUTkW4og=
|
github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118/go.mod h1:ZFUnHIVchZ9lJoWoEGUg8Q3M4U8aNNWA3CVSUTkW4og=
|
||||||
github.com/mdlayher/netlink v0.0.0-20190313131330-258ea9dff42c/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA=
|
github.com/mdlayher/netlink v0.0.0-20190313131330-258ea9dff42c/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA=
|
||||||
github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA=
|
github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g=
|
||||||
github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M=
|
github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw=
|
||||||
github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY=
|
|
||||||
github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o=
|
|
||||||
github.com/mdlayher/netlink v1.7.1 h1:FdUaT/e33HjEXagwELR8R3/KL1Fq5x3G5jgHLp/BTmg=
|
|
||||||
github.com/mdlayher/netlink v1.7.1/go.mod h1:nKO5CSjE/DJjVhk/TNp6vCE1ktVxEA8VEh8drhZzxsQ=
|
|
||||||
github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU=
|
github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU=
|
||||||
github.com/mdlayher/packet v1.1.1 h1:7Fv4OEMYqPl7//uBm04VgPpnSNi8fbBZznppgh6WMr8=
|
github.com/mdlayher/packet v1.1.2 h1:3Up1NG6LZrsgDVn6X4L9Ge/iyRyxFEFD9o6Pr3Q1nQY=
|
||||||
github.com/mdlayher/packet v1.1.1/go.mod h1:DRvYY5mH4M4lUqAnMg04E60U4fjUKMZ/4g2cHElZkKo=
|
github.com/mdlayher/packet v1.1.2/go.mod h1:GEu1+n9sG5VtiRE4SydOmX5GTwyyYlteZiFU+x0kew4=
|
||||||
github.com/mdlayher/raw v0.0.0-20190606142536-fef19f00fc18/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg=
|
|
||||||
github.com/mdlayher/raw v0.0.0-20191009151244-50f2db8cc065/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg=
|
|
||||||
github.com/mdlayher/raw v0.1.0 h1:K4PFMVy+AFsp0Zdlrts7yNhxc/uXoPVHi9RzRvtZF2Y=
|
github.com/mdlayher/raw v0.1.0 h1:K4PFMVy+AFsp0Zdlrts7yNhxc/uXoPVHi9RzRvtZF2Y=
|
||||||
github.com/mdlayher/raw v0.1.0/go.mod h1:yXnxvs6c0XoF/aK52/H5PjsVHmWBCFfZUfoh/Y5s9Sg=
|
github.com/mdlayher/raw v0.1.0/go.mod h1:yXnxvs6c0XoF/aK52/H5PjsVHmWBCFfZUfoh/Y5s9Sg=
|
||||||
github.com/mdlayher/socket v0.2.1/go.mod h1:QLlNPkFR88mRUNQIzRBMfXxwKal8H7u1h3bL1CV+f0E=
|
github.com/mdlayher/socket v0.2.1/go.mod h1:QLlNPkFR88mRUNQIzRBMfXxwKal8H7u1h3bL1CV+f0E=
|
||||||
github.com/mdlayher/socket v0.4.0 h1:280wsy40IC9M9q1uPGcLBwXpcTQDtoGwVt+BNoITxIw=
|
github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U=
|
||||||
github.com/mdlayher/socket v0.4.0/go.mod h1:xxFqz5GRCUN3UEOm9CZqEJsAbe1C8OwSK46NlmWuVoc=
|
github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA=
|
||||||
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
|
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
|
||||||
github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw=
|
github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI=
|
||||||
github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
|
github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||||
github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU=
|
github.com/onsi/ginkgo/v2 v2.10.0 h1:sfUl4qgLdvkChZrWCYndY2EAu9BRIw1YphNAzy1VNWs=
|
||||||
github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts=
|
github.com/onsi/ginkgo/v2 v2.10.0/go.mod h1:UDQOh5wbQUlMnkLfVaIUMtQ1Vus92oM+P2JX1aulgcE=
|
||||||
github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E=
|
github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU=
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
|
||||||
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
|
||||||
github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||||
|
@ -127,12 +108,10 @@ github.com/quic-go/qtls-go1-19 v0.3.2 h1:tFxjCFcTQzK+oMxG6Zcvp4Dq8dx4yD3dDiIiyc8
|
||||||
github.com/quic-go/qtls-go1-19 v0.3.2/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI=
|
github.com/quic-go/qtls-go1-19 v0.3.2/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI=
|
||||||
github.com/quic-go/qtls-go1-20 v0.2.2 h1:WLOPx6OY/hxtTxKV1Zrq20FtXtDEkeY00CGQm8GEa3E=
|
github.com/quic-go/qtls-go1-20 v0.2.2 h1:WLOPx6OY/hxtTxKV1Zrq20FtXtDEkeY00CGQm8GEa3E=
|
||||||
github.com/quic-go/qtls-go1-20 v0.2.2/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM=
|
github.com/quic-go/qtls-go1-20 v0.2.2/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM=
|
||||||
github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y0=
|
github.com/quic-go/quic-go v0.35.1 h1:b0kzj6b/cQAf05cT0CkQubHM31wiA+xH3IBkxP62poo=
|
||||||
github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA=
|
github.com/quic-go/quic-go v0.35.1/go.mod h1:+4CVgVppm0FNjpG3UcX8Joi/frKOH7/ciD5yGcwOO1g=
|
||||||
github.com/shirou/gopsutil/v3 v3.21.8 h1:nKct+uP0TV8DjjNiHanKf8SAuub+GNsbrOtM9Nl9biA=
|
github.com/shirou/gopsutil/v3 v3.21.8 h1:nKct+uP0TV8DjjNiHanKf8SAuub+GNsbrOtM9Nl9biA=
|
||||||
github.com/shirou/gopsutil/v3 v3.21.8/go.mod h1:YWp/H8Qs5fVmf17v7JNZzA0mPJ+mS2e9JdiUF9LlKzQ=
|
github.com/shirou/gopsutil/v3 v3.21.8/go.mod h1:YWp/H8Qs5fVmf17v7JNZzA0mPJ+mS2e9JdiUF9LlKzQ=
|
||||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
|
||||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
|
||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
|
||||||
|
@ -152,7 +131,6 @@ github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev
|
||||||
github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs=
|
github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs=
|
||||||
github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ=
|
github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ=
|
||||||
github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8=
|
github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8=
|
||||||
github.com/u-root/uio v0.0.0-20221213070652-c3537552635f/go.mod h1:IogEAUBXDEwX7oR/BMmCctShYs80ql4hF0ySdzGxf7E=
|
|
||||||
github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 h1:YcojQL98T/OO+rybuzn2+5KrD5dBwXIvYBvQ2cD3Avg=
|
github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 h1:YcojQL98T/OO+rybuzn2+5KrD5dBwXIvYBvQ2cD3Avg=
|
||||||
github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264=
|
github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264=
|
||||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||||
|
@ -160,11 +138,10 @@ go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
|
||||||
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
|
||||||
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
|
golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
|
||||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
|
||||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
|
golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
|
||||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
|
@ -172,40 +149,25 @@ golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190419010253-1f3472d942ba/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
|
||||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
|
||||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||||
golang.org/x/net v0.0.0-20210929193557-e81a3d93ecf6/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210929193557-e81a3d93ecf6/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
|
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190322080309-f49334f85ddc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190322080309-f49334f85ddc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190418153312-f0ce4c0180be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190606122018-79a91cf218c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
@ -219,8 +181,8 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
|
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
|
@ -230,12 +192,11 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
|
||||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
|
|
@ -72,8 +72,8 @@ func WriteJSONResponse(w http.ResponseWriter, r *http.Request, resp any) (err er
|
||||||
// WriteJSONResponseCode is like [WriteJSONResponse] but adds the ability to
|
// WriteJSONResponseCode is like [WriteJSONResponse] but adds the ability to
|
||||||
// redefine the status code.
|
// redefine the status code.
|
||||||
func WriteJSONResponseCode(w http.ResponseWriter, r *http.Request, code int, resp any) (err error) {
|
func WriteJSONResponseCode(w http.ResponseWriter, r *http.Request, code int, resp any) (err error) {
|
||||||
w.WriteHeader(code)
|
|
||||||
w.Header().Set(httphdr.ContentType, HdrValApplicationJSON)
|
w.Header().Set(httphdr.ContentType, HdrValApplicationJSON)
|
||||||
|
w.WriteHeader(code)
|
||||||
err = json.NewEncoder(w).Encode(resp)
|
err = json.NewEncoder(w).Encode(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Error(r, w, http.StatusInternalServerError, "encoding resp: %s", err)
|
Error(r, w, http.StatusInternalServerError, "encoding resp: %s", err)
|
||||||
|
|
|
@ -304,7 +304,7 @@ func tryConn6(req *dhcpv6.Message, c net.PacketConn) (ok, next bool, err error)
|
||||||
if !(response.Type() == dhcpv6.MessageTypeAdvertise &&
|
if !(response.Type() == dhcpv6.MessageTypeAdvertise &&
|
||||||
msg.TransactionID == req.TransactionID &&
|
msg.TransactionID == req.TransactionID &&
|
||||||
rcid != nil &&
|
rcid != nil &&
|
||||||
cid.Equal(*rcid)) {
|
cid.Equal(rcid)) {
|
||||||
|
|
||||||
log.Debug("dhcpv6: received message from server doesn't match our request")
|
log.Debug("dhcpv6: received message from server doesn't match our request")
|
||||||
|
|
||||||
|
|
6
internal/aghos/service.go
Normal file
6
internal/aghos/service.go
Normal file
|
@ -0,0 +1,6 @@
|
||||||
|
package aghos
|
||||||
|
|
||||||
|
// PreCheckActionStart performs the service start action pre-check.
|
||||||
|
func PreCheckActionStart() (err error) {
|
||||||
|
return preCheckActionStart()
|
||||||
|
}
|
32
internal/aghos/service_darwin.go
Normal file
32
internal/aghos/service_darwin.go
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
//go:build darwin
|
||||||
|
|
||||||
|
package aghos
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/AdguardTeam/golibs/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// preCheckActionStart performs the service start action pre-check. It warns
|
||||||
|
// user that the service should be installed into Applications directory.
|
||||||
|
func preCheckActionStart() (err error) {
|
||||||
|
exe, err := os.Executable()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("getting executable path: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
exe, err = filepath.EvalSymlinks(exe)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("evaluating executable symlinks: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.HasPrefix(exe, "/Applications/") {
|
||||||
|
log.Info("warning: service must be started from within the /Applications directory")
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
8
internal/aghos/service_others.go
Normal file
8
internal/aghos/service_others.go
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
//go:build !darwin
|
||||||
|
|
||||||
|
package aghos
|
||||||
|
|
||||||
|
// preCheckActionStart performs the service start action pre-check.
|
||||||
|
func preCheckActionStart() (err error) {
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -1,46 +1,60 @@
|
||||||
# DHCP server
|
# Testing DHCP Server
|
||||||
|
|
||||||
Contents:
|
Contents:
|
||||||
* [Test setup with Virtual Box](#vbox)
|
* [Test setup with Virtual Box](#vbox)
|
||||||
|
* [Quick test with DHCPTest](#dhcptest)
|
||||||
|
|
||||||
<a id="vbox"></a>
|
## <a href="#vbox" id="vbox" name="vbox">Test setup with Virtual Box</a>
|
||||||
## Test setup with Virtual Box
|
|
||||||
|
|
||||||
To set up a test environment for DHCP server you need:
|
### Prerequisites
|
||||||
|
|
||||||
* Linux host machine
|
To set up a test environment for DHCP server you will need:
|
||||||
* Virtual Box
|
|
||||||
* Virtual machine (guest OS doesn't matter)
|
|
||||||
|
|
||||||
### Configure client
|
* Linux AG Home host machine (Virtual).
|
||||||
|
* Virtual Box.
|
||||||
|
* Virtual machine (guest OS doesn't matter).
|
||||||
|
|
||||||
1. Install Virtual Box and run the following command to create a Host-Only network:
|
### Configure Virtual Box
|
||||||
|
|
||||||
$ VBoxManage hostonlyif create
|
1. Install Virtual Box and run the following command to create a Host-Only
|
||||||
|
network:
|
||||||
|
|
||||||
You can check its status by `ip a` command.
|
```sh
|
||||||
|
$ VBoxManage hostonlyif create
|
||||||
|
```
|
||||||
|
|
||||||
|
You can check its status by `ip a` command.
|
||||||
|
|
||||||
You can also set up Host-Only network using Virtual Box menu:
|
You can also set up Host-Only network using Virtual Box menu:
|
||||||
|
|
||||||
|
```
|
||||||
|
File -> Host Network Manager...
|
||||||
|
```
|
||||||
|
|
||||||
File -> Host Network Manager...
|
2. Create your virtual machine and set up its network:
|
||||||
|
|
||||||
2. Create your virtual machine and set up its network:
|
```
|
||||||
|
VM Settings -> Network -> Host-only Adapter
|
||||||
|
```
|
||||||
|
|
||||||
VM Settings -> Network -> Host-only Adapter
|
3. Start your VM, install an OS. Configure your network interface to use
|
||||||
|
DHCP and the OS should ask for a IP address from our DHCP server.
|
||||||
|
|
||||||
3. Start your VM, install an OS. Configure your network interface to use DHCP and the OS should ask for a IP address from our DHCP server.
|
4. To see the current IP addresses on client OS you can use `ip a` command on
|
||||||
|
Linux or `ipconfig` on Windows.
|
||||||
|
|
||||||
4. To see the current IP address on client OS you can use `ip a` command on Linux or `ipconfig` on Windows.
|
5. To force the client OS to request an IP from DHCP server again, you can
|
||||||
|
use `dhclient` on Linux or `ipconfig /release` on Windows.
|
||||||
|
|
||||||
5. To force the client OS to request an IP from DHCP server again, you can use `dhclient` on Linux or `ipconfig /release` on Windows.
|
### Configure server
|
||||||
|
|
||||||
### Configure server
|
1. Edit server configuration file `AdGuardHome.yaml`, for example:
|
||||||
|
|
||||||
1. Edit server configuration file 'AdGuardHome.yaml', for example:
|
```yaml
|
||||||
|
dhcp:
|
||||||
dhcp:
|
|
||||||
enabled: true
|
enabled: true
|
||||||
interface_name: vboxnet0
|
interface_name: vboxnet0
|
||||||
|
local_domain_name: lan
|
||||||
dhcpv4:
|
dhcpv4:
|
||||||
gateway_ip: 192.168.56.1
|
gateway_ip: 192.168.56.1
|
||||||
subnet_mask: 255.255.255.0
|
subnet_mask: 255.255.255.0
|
||||||
|
@ -54,11 +68,29 @@ To set up a test environment for DHCP server you need:
|
||||||
lease_duration: 86400
|
lease_duration: 86400
|
||||||
ra_slaac_only: false
|
ra_slaac_only: false
|
||||||
ra_allow_slaac: false
|
ra_allow_slaac: false
|
||||||
|
```
|
||||||
|
|
||||||
2. Start the server
|
2. Start the server
|
||||||
|
|
||||||
./AdGuardHome
|
```sh
|
||||||
|
./AdGuardHome -v
|
||||||
|
```
|
||||||
|
|
||||||
There should be a message in log which shows that DHCP server is ready:
|
There should be a message in log which shows that DHCP server is ready:
|
||||||
|
|
||||||
[info] DHCP: listening on 0.0.0.0:67
|
```
|
||||||
|
[info] DHCP: listening on 0.0.0.0:67
|
||||||
|
```
|
||||||
|
|
||||||
|
## <a href="#dhcptest" id="dhcptest" name="dhcptest">Quick test with DHCPTest utility</a>
|
||||||
|
|
||||||
|
### Prerequisites
|
||||||
|
|
||||||
|
* [DHCP test utility][dhcptest-gh].
|
||||||
|
|
||||||
|
### Quick test
|
||||||
|
|
||||||
|
The DHCP server could be tested for DISCOVER-OFFER packets with in
|
||||||
|
interactive mode.
|
||||||
|
|
||||||
|
[dhcptest-gh]: https://github.com/CyberShadow/dhcptest
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//go:build darwin
|
//go:build darwin || freebsd || openbsd
|
||||||
|
|
||||||
package dhcpd
|
package dhcpd
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//go:build darwin
|
//go:build darwin || freebsd || openbsd
|
||||||
|
|
||||||
package dhcpd
|
package dhcpd
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//go:build freebsd || linux || openbsd
|
//go:build linux
|
||||||
|
|
||||||
package dhcpd
|
package dhcpd
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
//go:build freebsd || linux || openbsd
|
//go:build linux
|
||||||
|
|
||||||
package dhcpd
|
package dhcpd
|
||||||
|
|
|
@ -239,36 +239,16 @@ func Create(conf *ServerConfig) (s *server, err error) {
|
||||||
// [aghhttp.RegisterFunc].
|
// [aghhttp.RegisterFunc].
|
||||||
s.registerHandlers()
|
s.registerHandlers()
|
||||||
|
|
||||||
v4conf := conf.Conf4
|
v4Enabled, v6Enabled, err := s.setServers(conf)
|
||||||
v4conf.InterfaceName = s.conf.InterfaceName
|
|
||||||
v4conf.notify = s.onNotify
|
|
||||||
v4conf.Enabled = s.conf.Enabled && v4conf.RangeStart.IsValid()
|
|
||||||
|
|
||||||
s.srv4, err = v4Create(&v4conf)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if v4conf.Enabled {
|
// Don't wrap the error, because it's informative enough as is.
|
||||||
return nil, fmt.Errorf("creating dhcpv4 srv: %w", err)
|
return nil, err
|
||||||
}
|
|
||||||
|
|
||||||
log.Debug("dhcpd: warning: creating dhcpv4 srv: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
v6conf := conf.Conf6
|
|
||||||
v6conf.Enabled = s.conf.Enabled
|
|
||||||
if len(v6conf.RangeStart) == 0 {
|
|
||||||
v6conf.Enabled = false
|
|
||||||
}
|
|
||||||
v6conf.InterfaceName = s.conf.InterfaceName
|
|
||||||
v6conf.notify = s.onNotify
|
|
||||||
s.srv6, err = v6Create(v6conf)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("creating dhcpv6 srv: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s.conf.Conf4 = conf.Conf4
|
s.conf.Conf4 = conf.Conf4
|
||||||
s.conf.Conf6 = conf.Conf6
|
s.conf.Conf6 = conf.Conf6
|
||||||
|
|
||||||
if s.conf.Enabled && !v4conf.Enabled && !v6conf.Enabled {
|
if s.conf.Enabled && !v4Enabled && !v6Enabled {
|
||||||
return nil, fmt.Errorf("neither dhcpv4 nor dhcpv6 srv is configured")
|
return nil, fmt.Errorf("neither dhcpv4 nor dhcpv6 srv is configured")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -289,6 +269,39 @@ func Create(conf *ServerConfig) (s *server, err error) {
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setServers updates DHCPv4 and DHCPv6 servers created from the provided
|
||||||
|
// configuration conf.
|
||||||
|
func (s *server) setServers(conf *ServerConfig) (v4Enabled, v6Enabled bool, err error) {
|
||||||
|
v4conf := conf.Conf4
|
||||||
|
v4conf.InterfaceName = s.conf.InterfaceName
|
||||||
|
v4conf.notify = s.onNotify
|
||||||
|
v4conf.Enabled = s.conf.Enabled && v4conf.RangeStart.IsValid()
|
||||||
|
|
||||||
|
s.srv4, err = v4Create(&v4conf)
|
||||||
|
if err != nil {
|
||||||
|
if v4conf.Enabled {
|
||||||
|
return true, false, fmt.Errorf("creating dhcpv4 srv: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("dhcpd: warning: creating dhcpv4 srv: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
v6conf := conf.Conf6
|
||||||
|
v6conf.InterfaceName = s.conf.InterfaceName
|
||||||
|
v6conf.notify = s.onNotify
|
||||||
|
v6conf.Enabled = s.conf.Enabled
|
||||||
|
if len(v6conf.RangeStart) == 0 {
|
||||||
|
v6conf.Enabled = false
|
||||||
|
}
|
||||||
|
|
||||||
|
s.srv6, err = v6Create(v6conf)
|
||||||
|
if err != nil {
|
||||||
|
return v4conf.Enabled, v6conf.Enabled, fmt.Errorf("creating dhcpv6 srv: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return v4conf.Enabled, v6conf.Enabled, nil
|
||||||
|
}
|
||||||
|
|
||||||
// Enabled returns true when the server is enabled.
|
// Enabled returns true when the server is enabled.
|
||||||
func (s *server) Enabled() (ok bool) {
|
func (s *server) Enabled() (ok bool) {
|
||||||
return s.conf.Enabled
|
return s.conf.Enabled
|
||||||
|
|
|
@ -16,6 +16,7 @@ import (
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/aghnet"
|
"github.com/AdguardTeam/AdGuardHome/internal/aghnet"
|
||||||
"github.com/AdguardTeam/golibs/errors"
|
"github.com/AdguardTeam/golibs/errors"
|
||||||
"github.com/AdguardTeam/golibs/log"
|
"github.com/AdguardTeam/golibs/log"
|
||||||
|
"github.com/AdguardTeam/golibs/netutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
type v4ServerConfJSON struct {
|
type v4ServerConfJSON struct {
|
||||||
|
@ -263,6 +264,28 @@ func (s *server) handleDHCPSetConfigV6(
|
||||||
return srv6, enabled, err
|
return srv6, enabled, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// createServers returns DHCPv4 and DHCPv6 servers created from the provided
|
||||||
|
// configuration conf.
|
||||||
|
func (s *server) createServers(conf *dhcpServerConfigJSON) (srv4, srv6 DHCPServer, err error) {
|
||||||
|
srv4, v4Enabled, err := s.handleDHCPSetConfigV4(conf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("bad dhcpv4 configuration: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
srv6, v6Enabled, err := s.handleDHCPSetConfigV6(conf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, fmt.Errorf("bad dhcpv6 configuration: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if conf.Enabled == aghalg.NBTrue && !v4Enabled && !v6Enabled {
|
||||||
|
return nil, nil, fmt.Errorf("dhcpv4 or dhcpv6 configuration must be complete")
|
||||||
|
}
|
||||||
|
|
||||||
|
return srv4, srv6, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleDHCPSetConfig is the handler for the POST /control/dhcp/set_config
|
||||||
|
// HTTP API.
|
||||||
func (s *server) handleDHCPSetConfig(w http.ResponseWriter, r *http.Request) {
|
func (s *server) handleDHCPSetConfig(w http.ResponseWriter, r *http.Request) {
|
||||||
conf := &dhcpServerConfigJSON{}
|
conf := &dhcpServerConfigJSON{}
|
||||||
conf.Enabled = aghalg.BoolToNullBool(s.conf.Enabled)
|
conf.Enabled = aghalg.BoolToNullBool(s.conf.Enabled)
|
||||||
|
@ -275,22 +298,9 @@ func (s *server) handleDHCPSetConfig(w http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
srv4, v4Enabled, err := s.handleDHCPSetConfigV4(conf)
|
srv4, srv6, err := s.createServers(conf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
aghhttp.Error(r, w, http.StatusBadRequest, "bad dhcpv4 configuration: %s", err)
|
aghhttp.Error(r, w, http.StatusBadRequest, "%s", err)
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
srv6, v6Enabled, err := s.handleDHCPSetConfigV6(conf)
|
|
||||||
if err != nil {
|
|
||||||
aghhttp.Error(r, w, http.StatusBadRequest, "bad dhcpv6 configuration: %s", err)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if conf.Enabled == aghalg.NBTrue && !v4Enabled && !v6Enabled {
|
|
||||||
aghhttp.Error(r, w, http.StatusBadRequest, "dhcpv4 or dhcpv6 configuration must be complete")
|
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -350,8 +360,10 @@ type netInterfaceJSON struct {
|
||||||
Addrs6 []netip.Addr `json:"ipv6_addresses"`
|
Addrs6 []netip.Addr `json:"ipv6_addresses"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// handleDHCPInterfaces is the handler for the GET /control/dhcp/interfaces
|
||||||
|
// HTTP API.
|
||||||
func (s *server) handleDHCPInterfaces(w http.ResponseWriter, r *http.Request) {
|
func (s *server) handleDHCPInterfaces(w http.ResponseWriter, r *http.Request) {
|
||||||
response := map[string]netInterfaceJSON{}
|
resp := map[string]*netInterfaceJSON{}
|
||||||
|
|
||||||
ifaces, err := net.Interfaces()
|
ifaces, err := net.Interfaces()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -362,82 +374,86 @@ func (s *server) handleDHCPInterfaces(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
for _, iface := range ifaces {
|
for _, iface := range ifaces {
|
||||||
if iface.Flags&net.FlagLoopback != 0 {
|
if iface.Flags&net.FlagLoopback != 0 {
|
||||||
// it's a loopback, skip it
|
// It's a loopback, skip it.
|
||||||
continue
|
|
||||||
}
|
|
||||||
if iface.Flags&net.FlagBroadcast == 0 {
|
|
||||||
// this interface doesn't support broadcast, skip it
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
var addrs []net.Addr
|
if iface.Flags&net.FlagBroadcast == 0 {
|
||||||
addrs, err = iface.Addrs()
|
// This interface doesn't support broadcast, skip it.
|
||||||
if err != nil {
|
continue
|
||||||
aghhttp.Error(
|
}
|
||||||
r,
|
|
||||||
w,
|
jsonIface, iErr := newNetInterfaceJSON(iface)
|
||||||
http.StatusInternalServerError,
|
if iErr != nil {
|
||||||
"Failed to get addresses for interface %s: %s",
|
aghhttp.Error(r, w, http.StatusInternalServerError, "%s", iErr)
|
||||||
iface.Name,
|
|
||||||
err,
|
|
||||||
)
|
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
jsonIface := netInterfaceJSON{
|
if jsonIface != nil {
|
||||||
Name: iface.Name,
|
resp[iface.Name] = jsonIface
|
||||||
HardwareAddr: iface.HardwareAddr.String(),
|
|
||||||
}
|
|
||||||
|
|
||||||
if iface.Flags != 0 {
|
|
||||||
jsonIface.Flags = iface.Flags.String()
|
|
||||||
}
|
|
||||||
// we don't want link-local addresses in json, so skip them
|
|
||||||
for _, addr := range addrs {
|
|
||||||
ipnet, ok := addr.(*net.IPNet)
|
|
||||||
if !ok {
|
|
||||||
// not an IPNet, should not happen
|
|
||||||
aghhttp.Error(
|
|
||||||
r,
|
|
||||||
w,
|
|
||||||
http.StatusInternalServerError,
|
|
||||||
"got iface.Addrs() element %[1]s that is not net.IPNet, it is %[1]T",
|
|
||||||
addr)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// ignore link-local
|
|
||||||
//
|
|
||||||
// TODO(e.burkov): Try to listen DHCP on LLA as well.
|
|
||||||
if ipnet.IP.IsLinkLocalUnicast() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if ip4 := ipnet.IP.To4(); ip4 != nil {
|
|
||||||
addr := netip.AddrFrom4(*(*[4]byte)(ip4))
|
|
||||||
jsonIface.Addrs4 = append(jsonIface.Addrs4, addr)
|
|
||||||
} else {
|
|
||||||
addr := netip.AddrFrom16(*(*[16]byte)(ipnet.IP))
|
|
||||||
jsonIface.Addrs6 = append(jsonIface.Addrs6, addr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(jsonIface.Addrs4)+len(jsonIface.Addrs6) != 0 {
|
|
||||||
jsonIface.GatewayIP = aghnet.GatewayIP(iface.Name)
|
|
||||||
response[iface.Name] = jsonIface
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = json.NewEncoder(w).Encode(response)
|
_ = aghhttp.WriteJSONResponse(w, r, resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newNetInterfaceJSON creates a JSON object from a [net.Interface] iface.
|
||||||
|
func newNetInterfaceJSON(iface net.Interface) (out *netInterfaceJSON, err error) {
|
||||||
|
addrs, err := iface.Addrs()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
aghhttp.Error(
|
return nil, fmt.Errorf(
|
||||||
r,
|
"failed to get addresses for interface %s: %s",
|
||||||
w,
|
iface.Name,
|
||||||
http.StatusInternalServerError,
|
|
||||||
"Failed to marshal json with available interfaces: %s",
|
|
||||||
err,
|
err,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out = &netInterfaceJSON{
|
||||||
|
Name: iface.Name,
|
||||||
|
HardwareAddr: iface.HardwareAddr.String(),
|
||||||
|
}
|
||||||
|
|
||||||
|
if iface.Flags != 0 {
|
||||||
|
out.Flags = iface.Flags.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// We don't want link-local addresses in JSON, so skip them.
|
||||||
|
for _, addr := range addrs {
|
||||||
|
ipNet, ok := addr.(*net.IPNet)
|
||||||
|
if !ok {
|
||||||
|
// Not an IPNet, should not happen.
|
||||||
|
return nil, fmt.Errorf("got iface.Addrs() element %[1]s that is not"+
|
||||||
|
" net.IPNet, it is %[1]T", addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore link-local.
|
||||||
|
//
|
||||||
|
// TODO(e.burkov): Try to listen DHCP on LLA as well.
|
||||||
|
if ipNet.IP.IsLinkLocalUnicast() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
vAddr, iErr := netutil.IPToAddrNoMapped(ipNet.IP)
|
||||||
|
if iErr != nil {
|
||||||
|
// Not an IPNet, should not happen.
|
||||||
|
return nil, fmt.Errorf("failed to convert IP address %[1]s: %w", addr, iErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if vAddr.Is4() {
|
||||||
|
out.Addrs4 = append(out.Addrs4, vAddr)
|
||||||
|
} else {
|
||||||
|
out.Addrs6 = append(out.Addrs6, vAddr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(out.Addrs4)+len(out.Addrs6) == 0 {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
out.GatewayIP = aghnet.GatewayIP(iface.Name)
|
||||||
|
|
||||||
|
return out, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// dhcpSearchOtherResult contains information about other DHCP server for
|
// dhcpSearchOtherResult contains information about other DHCP server for
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/AdguardTeam/golibs/errors"
|
||||||
"github.com/AdguardTeam/golibs/log"
|
"github.com/AdguardTeam/golibs/log"
|
||||||
"github.com/AdguardTeam/golibs/netutil"
|
"github.com/AdguardTeam/golibs/netutil"
|
||||||
"golang.org/x/net/icmp"
|
"golang.org/x/net/icmp"
|
||||||
|
@ -195,7 +196,7 @@ func createICMPv6RAPacket(params icmpv6RA) (data []byte, err error) {
|
||||||
return data, nil
|
return data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init - initialize RA module
|
// Init initializes RA module.
|
||||||
func (ra *raCtx) Init() (err error) {
|
func (ra *raCtx) Init() (err error) {
|
||||||
ra.stop.Store(0)
|
ra.stop.Store(0)
|
||||||
ra.conn = nil
|
ra.conn = nil
|
||||||
|
@ -203,8 +204,7 @@ func (ra *raCtx) Init() (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug("dhcpv6 ra: source IP address: %s DNS IP address: %s",
|
log.Debug("dhcpv6 ra: source IP address: %s DNS IP address: %s", ra.ipAddr, ra.dnsIPAddr)
|
||||||
ra.ipAddr, ra.dnsIPAddr)
|
|
||||||
|
|
||||||
params := icmpv6RA{
|
params := icmpv6RA{
|
||||||
managedAddressConfiguration: !ra.raSLAACOnly,
|
managedAddressConfiguration: !ra.raSLAACOnly,
|
||||||
|
@ -223,18 +223,15 @@ func (ra *raCtx) Init() (err error) {
|
||||||
return fmt.Errorf("creating packet: %w", err)
|
return fmt.Errorf("creating packet: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
success := false
|
|
||||||
ipAndScope := ra.ipAddr.String() + "%" + ra.ifaceName
|
ipAndScope := ra.ipAddr.String() + "%" + ra.ifaceName
|
||||||
ra.conn, err = icmp.ListenPacket("ip6:ipv6-icmp", ipAndScope)
|
ra.conn, err = icmp.ListenPacket("ip6:ipv6-icmp", ipAndScope)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("dhcpv6 ra: icmp.ListenPacket: %w", err)
|
return fmt.Errorf("dhcpv6 ra: icmp.ListenPacket: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if !success {
|
if err != nil {
|
||||||
derr := ra.Close()
|
err = errors.WithDeferred(err, ra.Close())
|
||||||
if derr != nil {
|
|
||||||
log.Error("closing context: %s", derr)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -269,7 +266,6 @@ func (ra *raCtx) Init() (err error) {
|
||||||
log.Debug("dhcpv6 ra: loop exit")
|
log.Debug("dhcpv6 ra: loop exit")
|
||||||
}()
|
}()
|
||||||
|
|
||||||
success = true
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -342,8 +342,8 @@ func (s *v4Server) rmLease(lease *Lease) (err error) {
|
||||||
// server to be configured and it's not.
|
// server to be configured and it's not.
|
||||||
const ErrUnconfigured errors.Error = "server is unconfigured"
|
const ErrUnconfigured errors.Error = "server is unconfigured"
|
||||||
|
|
||||||
// AddStaticLease implements the DHCPServer interface for *v4Server. It is safe
|
// AddStaticLease implements the DHCPServer interface for *v4Server. It is
|
||||||
// for concurrent use.
|
// safe for concurrent use.
|
||||||
func (s *v4Server) AddStaticLease(l *Lease) (err error) {
|
func (s *v4Server) AddStaticLease(l *Lease) (err error) {
|
||||||
defer func() { err = errors.Annotate(err, "dhcpv4: adding static lease: %w") }()
|
defer func() { err = errors.Annotate(err, "dhcpv4: adding static lease: %w") }()
|
||||||
|
|
||||||
|
@ -354,21 +354,23 @@ func (s *v4Server) AddStaticLease(l *Lease) (err error) {
|
||||||
l.IP = l.IP.Unmap()
|
l.IP = l.IP.Unmap()
|
||||||
|
|
||||||
if !l.IP.Is4() {
|
if !l.IP.Is4() {
|
||||||
return fmt.Errorf("invalid ip %q, only ipv4 is supported", l.IP)
|
return fmt.Errorf("invalid IP %q: only IPv4 is supported", l.IP)
|
||||||
} else if gwIP := s.conf.GatewayIP; gwIP == l.IP {
|
} else if gwIP := s.conf.GatewayIP; gwIP == l.IP {
|
||||||
return fmt.Errorf("can't assign the gateway IP %s to the lease", gwIP)
|
return fmt.Errorf("can't assign the gateway IP %q to the lease", gwIP)
|
||||||
}
|
}
|
||||||
|
|
||||||
l.IsStatic = true
|
l.IsStatic = true
|
||||||
|
|
||||||
err = netutil.ValidateMAC(l.HWAddr)
|
err = netutil.ValidateMAC(l.HWAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// Don't wrap the error, because it's informative enough as is.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if hostname := l.Hostname; hostname != "" {
|
if hostname := l.Hostname; hostname != "" {
|
||||||
hostname, err = normalizeHostname(hostname)
|
hostname, err = normalizeHostname(hostname)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// Don't wrap the error, because it's informative enough as is.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -386,32 +388,9 @@ func (s *v4Server) AddStaticLease(l *Lease) (err error) {
|
||||||
l.Hostname = hostname
|
l.Hostname = hostname
|
||||||
}
|
}
|
||||||
|
|
||||||
// Perform the following actions in an anonymous function to make sure
|
err = s.updateStaticLease(l)
|
||||||
// that the lock gets unlocked before the notification step.
|
|
||||||
func() {
|
|
||||||
s.leasesLock.Lock()
|
|
||||||
defer s.leasesLock.Unlock()
|
|
||||||
|
|
||||||
err = s.rmDynamicLease(l)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf(
|
|
||||||
"removing dynamic leases for %s (%s): %w",
|
|
||||||
l.IP,
|
|
||||||
l.HWAddr,
|
|
||||||
err,
|
|
||||||
)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
err = s.addLease(l)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("adding static lease for %s (%s): %w", l.IP, l.HWAddr, err)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// Don't wrap the error, because it's informative enough as is.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -421,6 +400,25 @@ func (s *v4Server) AddStaticLease(l *Lease) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// updateStaticLease safe removes dynamic lease with the same properties and
|
||||||
|
// then adds a static lease l.
|
||||||
|
func (s *v4Server) updateStaticLease(l *Lease) (err error) {
|
||||||
|
s.leasesLock.Lock()
|
||||||
|
defer s.leasesLock.Unlock()
|
||||||
|
|
||||||
|
err = s.rmDynamicLease(l)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("removing dynamic leases for %s (%s): %w", l.IP, l.HWAddr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = s.addLease(l)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("adding static lease for %s (%s): %w", l.IP, l.HWAddr, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// RemoveStaticLease removes a static lease. It is safe for concurrent use.
|
// RemoveStaticLease removes a static lease. It is safe for concurrent use.
|
||||||
func (s *v4Server) RemoveStaticLease(l *Lease) (err error) {
|
func (s *v4Server) RemoveStaticLease(l *Lease) (err error) {
|
||||||
defer func() { err = errors.Annotate(err, "dhcpv4: %w") }()
|
defer func() { err = errors.Annotate(err, "dhcpv4: %w") }()
|
||||||
|
@ -894,24 +892,9 @@ func (s *v4Server) handleDecline(req, resp *dhcpv4.DHCPv4) (err error) {
|
||||||
reqIP = req.ClientIPAddr
|
reqIP = req.ClientIPAddr
|
||||||
}
|
}
|
||||||
|
|
||||||
netIP, ok := netip.AddrFromSlice(reqIP)
|
oldLease := s.findLeaseForIP(reqIP, mac)
|
||||||
if !ok {
|
|
||||||
log.Info("dhcpv4: invalid IP: %s", reqIP)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var oldLease *Lease
|
|
||||||
for _, l := range s.leases {
|
|
||||||
if bytes.Equal(l.HWAddr, mac) && l.IP == netIP {
|
|
||||||
oldLease = l
|
|
||||||
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if oldLease == nil {
|
if oldLease == nil {
|
||||||
log.Info("dhcpv4: lease with ip %s for %s not found", reqIP, mac)
|
log.Info("dhcpv4: lease with IP %s for %s not found", reqIP, mac)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -925,7 +908,7 @@ func (s *v4Server) handleDecline(req, resp *dhcpv4.DHCPv4) (err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("allocating new lease for %s: %w", mac, err)
|
return fmt.Errorf("allocating new lease for %s: %w", mac, err)
|
||||||
} else if newLease == nil {
|
} else if newLease == nil {
|
||||||
log.Info("dhcpv4: allocating new lease for %s: no more ip addresses", mac)
|
log.Info("dhcpv4: allocating new lease for %s: no more IP addresses", mac)
|
||||||
|
|
||||||
resp.YourIPAddr = make([]byte, 4)
|
resp.YourIPAddr = make([]byte, 4)
|
||||||
resp.UpdateOption(dhcpv4.OptMessageType(dhcpv4.MessageTypeAck))
|
resp.UpdateOption(dhcpv4.OptMessageType(dhcpv4.MessageTypeAck))
|
||||||
|
@ -941,15 +924,32 @@ func (s *v4Server) handleDecline(req, resp *dhcpv4.DHCPv4) (err error) {
|
||||||
return fmt.Errorf("adding new lease for %s: %w", mac, err)
|
return fmt.Errorf("adding new lease for %s: %w", mac, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info("dhcpv4: changed ip from %s to %s for %s", reqIP, newLease.IP, mac)
|
log.Info("dhcpv4: changed IP from %s to %s for %s", reqIP, newLease.IP, mac)
|
||||||
|
|
||||||
resp.YourIPAddr = net.IP(newLease.IP.AsSlice())
|
|
||||||
|
|
||||||
|
resp.YourIPAddr = newLease.IP.AsSlice()
|
||||||
resp.UpdateOption(dhcpv4.OptMessageType(dhcpv4.MessageTypeAck))
|
resp.UpdateOption(dhcpv4.OptMessageType(dhcpv4.MessageTypeAck))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// findLeaseForIP returns a lease for provided ip and mac.
|
||||||
|
func (s *v4Server) findLeaseForIP(ip net.IP, mac net.HardwareAddr) (l *Lease) {
|
||||||
|
netIP, ok := netip.AddrFromSlice(ip)
|
||||||
|
if !ok {
|
||||||
|
log.Info("dhcpv4: invalid IP: %s", ip)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, il := range s.leases {
|
||||||
|
if bytes.Equal(il.HWAddr, mac) && il.IP == netIP {
|
||||||
|
return il
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// handleRelease is the handler for the DHCP Release request.
|
// handleRelease is the handler for the DHCP Release request.
|
||||||
func (s *v4Server) handleRelease(req, resp *dhcpv4.DHCPv4) (err error) {
|
func (s *v4Server) handleRelease(req, resp *dhcpv4.DHCPv4) (err error) {
|
||||||
mac := req.ClientHWAddr
|
mac := req.ClientHWAddr
|
||||||
|
@ -995,11 +995,80 @@ func (s *v4Server) handleRelease(req, resp *dhcpv4.DHCPv4) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find a lease associated with MAC and prepare response
|
// messageHandler describes a DHCPv4 message handler function.
|
||||||
// Return 1: OK
|
type messageHandler func(s *v4Server, req, resp *dhcpv4.DHCPv4) (rCode int, l *Lease, err error)
|
||||||
// Return 0: error; reply with Nak
|
|
||||||
// Return -1: error; don't reply
|
// messageHandlers is a map of handlers for various messages with message types
|
||||||
func (s *v4Server) handle(req, resp *dhcpv4.DHCPv4) int {
|
// keys.
|
||||||
|
var messageHandlers = map[dhcpv4.MessageType]messageHandler{
|
||||||
|
dhcpv4.MessageTypeDiscover: func(
|
||||||
|
s *v4Server,
|
||||||
|
req *dhcpv4.DHCPv4,
|
||||||
|
resp *dhcpv4.DHCPv4,
|
||||||
|
) (rCode int, l *Lease, err error) {
|
||||||
|
l, err = s.handleDiscover(req, resp)
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, fmt.Errorf("handling discover: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if l == nil {
|
||||||
|
return 0, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1, l, nil
|
||||||
|
},
|
||||||
|
dhcpv4.MessageTypeRequest: func(
|
||||||
|
s *v4Server,
|
||||||
|
req *dhcpv4.DHCPv4,
|
||||||
|
resp *dhcpv4.DHCPv4,
|
||||||
|
) (rCode int, l *Lease, err error) {
|
||||||
|
var toReply bool
|
||||||
|
l, toReply = s.handleRequest(req, resp)
|
||||||
|
if l == nil {
|
||||||
|
if toReply {
|
||||||
|
return 0, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drop the packet.
|
||||||
|
return -1, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1, l, nil
|
||||||
|
},
|
||||||
|
dhcpv4.MessageTypeDecline: func(
|
||||||
|
s *v4Server,
|
||||||
|
req *dhcpv4.DHCPv4,
|
||||||
|
resp *dhcpv4.DHCPv4,
|
||||||
|
) (rCode int, l *Lease, err error) {
|
||||||
|
err = s.handleDecline(req, resp)
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, fmt.Errorf("handling decline: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1, nil, nil
|
||||||
|
},
|
||||||
|
dhcpv4.MessageTypeRelease: func(
|
||||||
|
s *v4Server,
|
||||||
|
req *dhcpv4.DHCPv4,
|
||||||
|
resp *dhcpv4.DHCPv4,
|
||||||
|
) (rCode int, l *Lease, err error) {
|
||||||
|
err = s.handleRelease(req, resp)
|
||||||
|
if err != nil {
|
||||||
|
return 0, nil, fmt.Errorf("handling release: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return 1, nil, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// handle processes request, it finds a lease associated with MAC address and
|
||||||
|
// prepares response.
|
||||||
|
//
|
||||||
|
// Possible return values are:
|
||||||
|
// - "1": OK,
|
||||||
|
// - "0": error, reply with Nak,
|
||||||
|
// - "-1": error, don't reply.
|
||||||
|
func (s *v4Server) handle(req, resp *dhcpv4.DHCPv4) (rCode int) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
// Include server's identifier option since any reply should contain it.
|
// Include server's identifier option since any reply should contain it.
|
||||||
|
@ -1007,47 +1076,26 @@ func (s *v4Server) handle(req, resp *dhcpv4.DHCPv4) int {
|
||||||
// See https://datatracker.ietf.org/doc/html/rfc2131#page-29.
|
// See https://datatracker.ietf.org/doc/html/rfc2131#page-29.
|
||||||
resp.UpdateOption(dhcpv4.OptServerIdentifier(s.conf.dnsIPAddrs[0].AsSlice()))
|
resp.UpdateOption(dhcpv4.OptServerIdentifier(s.conf.dnsIPAddrs[0].AsSlice()))
|
||||||
|
|
||||||
// TODO(a.garipov): Refactor this into handlers.
|
handler := messageHandlers[req.MessageType()]
|
||||||
var l *Lease
|
if handler == nil {
|
||||||
switch mt := req.MessageType(); mt {
|
s.updateOptions(req, resp)
|
||||||
case dhcpv4.MessageTypeDiscover:
|
|
||||||
l, err = s.handleDiscover(req, resp)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("dhcpv4: handling discover: %s", err)
|
|
||||||
|
|
||||||
return 0
|
return 1
|
||||||
}
|
}
|
||||||
|
|
||||||
if l == nil {
|
rCode, l, err := handler(s, req, resp)
|
||||||
return 0
|
if err != nil {
|
||||||
}
|
log.Error("dhcpv4: %s", err)
|
||||||
case dhcpv4.MessageTypeRequest:
|
|
||||||
var toReply bool
|
|
||||||
l, toReply = s.handleRequest(req, resp)
|
|
||||||
if l == nil {
|
|
||||||
if toReply {
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
return -1 // drop packet
|
|
||||||
}
|
|
||||||
case dhcpv4.MessageTypeDecline:
|
|
||||||
err = s.handleDecline(req, resp)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("dhcpv4: handling decline: %s", err)
|
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
case dhcpv4.MessageTypeRelease:
|
|
||||||
err = s.handleRelease(req, resp)
|
|
||||||
if err != nil {
|
|
||||||
log.Error("dhcpv4: handling release: %s", err)
|
|
||||||
|
|
||||||
return 0
|
if rCode != 1 {
|
||||||
}
|
return rCode
|
||||||
}
|
}
|
||||||
|
|
||||||
if l != nil {
|
if l != nil {
|
||||||
resp.YourIPAddr = net.IP(l.IP.AsSlice())
|
resp.YourIPAddr = l.IP.AsSlice()
|
||||||
}
|
}
|
||||||
|
|
||||||
s.updateOptions(req, resp)
|
s.updateOptions(req, resp)
|
||||||
|
@ -1162,23 +1210,8 @@ func (s *v4Server) Start() (err error) {
|
||||||
// No available IP addresses which may appear later.
|
// No available IP addresses which may appear later.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// Update the value of Domain Name Server option separately from others if
|
|
||||||
// not assigned yet since its value is available only at server's start.
|
|
||||||
//
|
|
||||||
// TODO(e.burkov): Initialize as implicit option with the rest of default
|
|
||||||
// options when it will be possible to do before the call to Start.
|
|
||||||
if !s.explicitOpts.Has(dhcpv4.OptionDomainNameServer) {
|
|
||||||
s.implicitOpts.Update(dhcpv4.OptDNS(dnsIPAddrs...))
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, ip := range dnsIPAddrs {
|
s.configureDNSIPAddrs(dnsIPAddrs)
|
||||||
ip = ip.To4()
|
|
||||||
if ip == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
s.conf.dnsIPAddrs = append(s.conf.dnsIPAddrs, netip.AddrFrom4(*(*[4]byte)(ip)))
|
|
||||||
}
|
|
||||||
|
|
||||||
var c net.PacketConn
|
var c net.PacketConn
|
||||||
if c, err = s.newDHCPConn(iface); err != nil {
|
if c, err = s.newDHCPConn(iface); err != nil {
|
||||||
|
@ -1199,10 +1232,10 @@ func (s *v4Server) Start() (err error) {
|
||||||
log.Info("dhcpv4: listening")
|
log.Info("dhcpv4: listening")
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if serr := s.srv.Serve(); errors.Is(serr, net.ErrClosed) {
|
if sErr := s.srv.Serve(); errors.Is(sErr, net.ErrClosed) {
|
||||||
log.Info("dhcpv4: server is closed")
|
log.Info("dhcpv4: server is closed")
|
||||||
} else if serr != nil {
|
} else if sErr != nil {
|
||||||
log.Error("dhcpv4: srv.Serve: %s", serr)
|
log.Error("dhcpv4: srv.Serve: %s", sErr)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -1213,6 +1246,28 @@ func (s *v4Server) Start() (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// configureDNSIPAddrs updates v4Server configuration with provided slice of
|
||||||
|
// dns IP addresses.
|
||||||
|
func (s *v4Server) configureDNSIPAddrs(dnsIPAddrs []net.IP) {
|
||||||
|
// Update the value of Domain Name Server option separately from others if
|
||||||
|
// not assigned yet since its value is available only at server's start.
|
||||||
|
//
|
||||||
|
// TODO(e.burkov): Initialize as implicit option with the rest of default
|
||||||
|
// options when it will be possible to do before the call to Start.
|
||||||
|
if !s.explicitOpts.Has(dhcpv4.OptionDomainNameServer) {
|
||||||
|
s.implicitOpts.Update(dhcpv4.OptDNS(dnsIPAddrs...))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ip := range dnsIPAddrs {
|
||||||
|
vAddr, err := netutil.IPToAddr(ip, netutil.AddrFamilyIPv4)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
s.conf.dnsIPAddrs = append(s.conf.dnsIPAddrs, vAddr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Stop - stop server
|
// Stop - stop server
|
||||||
func (s *v4Server) Stop() (err error) {
|
func (s *v4Server) Stop() (err error) {
|
||||||
if s.srv == nil {
|
if s.srv == nil {
|
||||||
|
|
|
@ -227,7 +227,7 @@ func TestV4Server_AddRemove_static(t *testing.T) {
|
||||||
},
|
},
|
||||||
name: "with_gateway_ip",
|
name: "with_gateway_ip",
|
||||||
wantErrMsg: "dhcpv4: adding static lease: " +
|
wantErrMsg: "dhcpv4: adding static lease: " +
|
||||||
"can't assign the gateway IP 192.168.10.1 to the lease",
|
`can't assign the gateway IP "192.168.10.1" to the lease`,
|
||||||
}, {
|
}, {
|
||||||
lease: &Lease{
|
lease: &Lease{
|
||||||
Hostname: "ip6.local",
|
Hostname: "ip6.local",
|
||||||
|
@ -236,7 +236,7 @@ func TestV4Server_AddRemove_static(t *testing.T) {
|
||||||
},
|
},
|
||||||
name: "ipv6",
|
name: "ipv6",
|
||||||
wantErrMsg: `dhcpv4: adding static lease: ` +
|
wantErrMsg: `dhcpv4: adding static lease: ` +
|
||||||
`invalid ip "ffff::1", only ipv4 is supported`,
|
`invalid IP "ffff::1": only IPv4 is supported`,
|
||||||
}, {
|
}, {
|
||||||
lease: &Lease{
|
lease: &Lease{
|
||||||
Hostname: "bad-mac.local",
|
Hostname: "bad-mac.local",
|
||||||
|
|
|
@ -30,7 +30,7 @@ type v6Server struct {
|
||||||
leasesLock sync.Mutex
|
leasesLock sync.Mutex
|
||||||
leases []*Lease
|
leases []*Lease
|
||||||
ipAddrs [256]byte
|
ipAddrs [256]byte
|
||||||
sid dhcpv6.Duid
|
sid dhcpv6.DUID
|
||||||
|
|
||||||
ra raCtx // RA module
|
ra raCtx // RA module
|
||||||
|
|
||||||
|
@ -586,9 +586,31 @@ func (s *v6Server) packetHandler(conn net.PacketConn, peer net.Addr, req dhcpv6.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// initialize RA module
|
// configureDNSIPAddrs updates v6Server configuration with the slice of DNS IP
|
||||||
func (s *v6Server) initRA(iface *net.Interface) error {
|
// addresses of provided interface iface. Initializes RA module.
|
||||||
// choose the source IP address - should be link-local-unicast
|
func (s *v6Server) configureDNSIPAddrs(iface *net.Interface) (ok bool, err error) {
|
||||||
|
dnsIPAddrs, err := aghnet.IfaceDNSIPAddrs(
|
||||||
|
iface,
|
||||||
|
aghnet.IPVersion6,
|
||||||
|
defaultMaxAttempts,
|
||||||
|
defaultBackoff,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("interface %s: %w", iface.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dnsIPAddrs) == 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.conf.dnsIPAddrs = dnsIPAddrs
|
||||||
|
|
||||||
|
return true, s.initRA(iface)
|
||||||
|
}
|
||||||
|
|
||||||
|
// initRA initializes RA module.
|
||||||
|
func (s *v6Server) initRA(iface *net.Interface) (err error) {
|
||||||
|
// Choose the source IP address - should be link-local-unicast.
|
||||||
s.ra.ipAddr = s.conf.dnsIPAddrs[0]
|
s.ra.ipAddr = s.conf.dnsIPAddrs[0]
|
||||||
for _, ip := range s.conf.dnsIPAddrs {
|
for _, ip := range s.conf.dnsIPAddrs {
|
||||||
if ip.IsLinkLocalUnicast() {
|
if ip.IsLinkLocalUnicast() {
|
||||||
|
@ -604,6 +626,7 @@ func (s *v6Server) initRA(iface *net.Interface) error {
|
||||||
s.ra.ifaceName = s.conf.InterfaceName
|
s.ra.ifaceName = s.conf.InterfaceName
|
||||||
s.ra.iface = iface
|
s.ra.iface = iface
|
||||||
s.ra.packetSendPeriod = 1 * time.Second
|
s.ra.packetSendPeriod = 1 * time.Second
|
||||||
|
|
||||||
return s.ra.Init()
|
return s.ra.Init()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -623,63 +646,47 @@ func (s *v6Server) Start() (err error) {
|
||||||
|
|
||||||
log.Debug("dhcpv6: starting...")
|
log.Debug("dhcpv6: starting...")
|
||||||
|
|
||||||
dnsIPAddrs, err := aghnet.IfaceDNSIPAddrs(
|
ok, err := s.configureDNSIPAddrs(iface)
|
||||||
iface,
|
|
||||||
aghnet.IPVersion6,
|
|
||||||
defaultMaxAttempts,
|
|
||||||
defaultBackoff,
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("interface %s: %w", ifaceName, err)
|
// Don't wrap the error, because it's informative enough as is.
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(dnsIPAddrs) == 0 {
|
if !ok {
|
||||||
// No available IP addresses which may appear later.
|
// No available IP addresses which may appear later.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
s.conf.dnsIPAddrs = dnsIPAddrs
|
// Don't initialize DHCPv6 server if we must force the clients to use SLAAC.
|
||||||
|
|
||||||
err = s.initRA(iface)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// don't initialize DHCPv6 server if we must force the clients to use SLAAC
|
|
||||||
if s.conf.RASLAACOnly {
|
if s.conf.RASLAACOnly {
|
||||||
log.Debug("not starting dhcpv6 server due to ra_slaac_only=true")
|
log.Debug("not starting dhcpv6 server due to ra_slaac_only=true")
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug("dhcpv6: listening...")
|
|
||||||
|
|
||||||
err = netutil.ValidateMAC(iface.HardwareAddr)
|
err = netutil.ValidateMAC(iface.HardwareAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("validating interface %s: %w", iface.Name, err)
|
return fmt.Errorf("validating interface %s: %w", iface.Name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
s.sid = dhcpv6.Duid{
|
s.sid = &dhcpv6.DUIDLLT{
|
||||||
Type: dhcpv6.DUID_LLT,
|
HWType: iana.HWTypeEthernet,
|
||||||
HwType: iana.HWTypeEthernet,
|
|
||||||
LinkLayerAddr: iface.HardwareAddr,
|
LinkLayerAddr: iface.HardwareAddr,
|
||||||
Time: dhcpv6.GetTime(),
|
Time: dhcpv6.GetTime(),
|
||||||
}
|
}
|
||||||
|
|
||||||
laddr := &net.UDPAddr{
|
s.srv, err = server6.NewServer(iface.Name, nil, s.packetHandler, server6.WithDebugLogger())
|
||||||
IP: net.ParseIP("::"),
|
|
||||||
Port: dhcpv6.DefaultServerPort,
|
|
||||||
}
|
|
||||||
s.srv, err = server6.NewServer(iface.Name, laddr, s.packetHandler, server6.WithDebugLogger())
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log.Debug("dhcpv6: listening...")
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if serr := s.srv.Serve(); errors.Is(serr, net.ErrClosed) {
|
if sErr := s.srv.Serve(); errors.Is(sErr, net.ErrClosed) {
|
||||||
log.Info("dhcpv6: server is closed")
|
log.Info("dhcpv6: server is closed")
|
||||||
} else if serr != nil {
|
} else if sErr != nil {
|
||||||
log.Error("dhcpv6: srv.Serve: %s", serr)
|
log.Error("dhcpv6: srv.Serve: %s", sErr)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -121,9 +121,8 @@ func TestV6GetLease(t *testing.T) {
|
||||||
|
|
||||||
dnsAddr := net.ParseIP("2000::1")
|
dnsAddr := net.ParseIP("2000::1")
|
||||||
s.conf.dnsIPAddrs = []net.IP{dnsAddr}
|
s.conf.dnsIPAddrs = []net.IP{dnsAddr}
|
||||||
s.sid = dhcpv6.Duid{
|
s.sid = &dhcpv6.DUIDLL{
|
||||||
Type: dhcpv6.DUID_LLT,
|
HWType: iana.HWTypeEthernet,
|
||||||
HwType: iana.HWTypeEthernet,
|
|
||||||
LinkLayerAddr: net.HardwareAddr{0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA},
|
LinkLayerAddr: net.HardwareAddr{0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -216,9 +215,8 @@ func TestV6GetDynamicLease(t *testing.T) {
|
||||||
|
|
||||||
dnsAddr := net.ParseIP("2000::1")
|
dnsAddr := net.ParseIP("2000::1")
|
||||||
s.conf.dnsIPAddrs = []net.IP{dnsAddr}
|
s.conf.dnsIPAddrs = []net.IP{dnsAddr}
|
||||||
s.sid = dhcpv6.Duid{
|
s.sid = &dhcpv6.DUIDLL{
|
||||||
Type: dhcpv6.DUID_LLT,
|
HWType: iana.HWTypeEthernet,
|
||||||
HwType: iana.HWTypeEthernet,
|
|
||||||
LinkLayerAddr: net.HardwareAddr{0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA},
|
LinkLayerAddr: net.HardwareAddr{0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,7 @@ import (
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/aghtest"
|
"github.com/AdguardTeam/AdGuardHome/internal/aghtest"
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/dhcpd"
|
"github.com/AdguardTeam/AdGuardHome/internal/dhcpd"
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
|
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
|
||||||
|
"github.com/AdguardTeam/AdGuardHome/internal/filtering/hashprefix"
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/filtering/safesearch"
|
"github.com/AdguardTeam/AdGuardHome/internal/filtering/safesearch"
|
||||||
"github.com/AdguardTeam/dnsproxy/proxy"
|
"github.com/AdguardTeam/dnsproxy/proxy"
|
||||||
"github.com/AdguardTeam/dnsproxy/upstream"
|
"github.com/AdguardTeam/dnsproxy/upstream"
|
||||||
|
@ -915,13 +916,23 @@ func TestBlockedByHosts(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBlockedBySafeBrowsing(t *testing.T) {
|
func TestBlockedBySafeBrowsing(t *testing.T) {
|
||||||
const hostname = "wmconvirus.narod.ru"
|
const (
|
||||||
|
hostname = "wmconvirus.narod.ru"
|
||||||
|
cacheTime = 10 * time.Minute
|
||||||
|
cacheSize = 10000
|
||||||
|
)
|
||||||
|
|
||||||
|
sbChecker := hashprefix.New(&hashprefix.Config{
|
||||||
|
CacheTime: cacheTime,
|
||||||
|
CacheSize: cacheSize,
|
||||||
|
Upstream: aghtest.NewBlockUpstream(hostname, true),
|
||||||
|
})
|
||||||
|
|
||||||
sbUps := aghtest.NewBlockUpstream(hostname, true)
|
|
||||||
ans4, _ := (&aghtest.TestResolver{}).HostToIPs(hostname)
|
ans4, _ := (&aghtest.TestResolver{}).HostToIPs(hostname)
|
||||||
|
|
||||||
filterConf := &filtering.Config{
|
filterConf := &filtering.Config{
|
||||||
SafeBrowsingEnabled: true,
|
SafeBrowsingEnabled: true,
|
||||||
|
SafeBrowsingChecker: sbChecker,
|
||||||
}
|
}
|
||||||
forwardConf := ServerConfig{
|
forwardConf := ServerConfig{
|
||||||
UDPListenAddrs: []*net.UDPAddr{{}},
|
UDPListenAddrs: []*net.UDPAddr{{}},
|
||||||
|
@ -935,7 +946,6 @@ func TestBlockedBySafeBrowsing(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
s := createTestServer(t, filterConf, forwardConf, nil)
|
s := createTestServer(t, filterConf, forwardConf, nil)
|
||||||
s.dnsFilter.SetSafeBrowsingUpstream(sbUps)
|
|
||||||
startDeferStop(t, s)
|
startDeferStop(t, s)
|
||||||
addr := s.dnsProxy.Addr(proxy.ProtoUDP)
|
addr := s.dnsProxy.Addr(proxy.ProtoUDP)
|
||||||
|
|
||||||
|
|
|
@ -205,8 +205,8 @@ func TestDNSForwardHTTP_handleSetConfig(t *testing.T) {
|
||||||
wantSet: `validating upstream servers: validating upstream "!!!": not an ip:port`,
|
wantSet: `validating upstream servers: validating upstream "!!!": not an ip:port`,
|
||||||
}, {
|
}, {
|
||||||
name: "bootstraps_bad",
|
name: "bootstraps_bad",
|
||||||
wantSet: `checking bootstrap a: invalid address: ` +
|
wantSet: `checking bootstrap a: invalid address: bootstrap a:53: ` +
|
||||||
`Resolver a is not eligible to be a bootstrap DNS server`,
|
`ParseAddr("a"): unable to parse IP`,
|
||||||
}, {
|
}, {
|
||||||
name: "cache_bad_ttl",
|
name: "cache_bad_ttl",
|
||||||
wantSet: `cache_ttl_min must be less or equal than cache_ttl_max`,
|
wantSet: `cache_ttl_min must be less or equal than cache_ttl_max`,
|
||||||
|
@ -487,7 +487,8 @@ func TestServer_handleTestUpstreaDNS(t *testing.T) {
|
||||||
},
|
},
|
||||||
wantResp: map[string]any{
|
wantResp: map[string]any{
|
||||||
badUps: `upstream "` + badUps + `" fails to exchange: ` +
|
badUps: `upstream "` + badUps + `" fails to exchange: ` +
|
||||||
`couldn't communicate with upstream: dns: id mismatch`,
|
`couldn't communicate with upstream: exchanging with ` +
|
||||||
|
badUps + ` over tcp: dns: id mismatch`,
|
||||||
},
|
},
|
||||||
name: "broken",
|
name: "broken",
|
||||||
}, {
|
}, {
|
||||||
|
@ -497,7 +498,8 @@ func TestServer_handleTestUpstreaDNS(t *testing.T) {
|
||||||
wantResp: map[string]any{
|
wantResp: map[string]any{
|
||||||
goodUps: "OK",
|
goodUps: "OK",
|
||||||
badUps: `upstream "` + badUps + `" fails to exchange: ` +
|
badUps: `upstream "` + badUps + `" fails to exchange: ` +
|
||||||
`couldn't communicate with upstream: dns: id mismatch`,
|
`couldn't communicate with upstream: exchanging with ` +
|
||||||
|
badUps + ` over tcp: dns: id mismatch`,
|
||||||
},
|
},
|
||||||
name: "both",
|
name: "both",
|
||||||
}}
|
}}
|
||||||
|
|
|
@ -18,8 +18,6 @@ import (
|
||||||
|
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/aghhttp"
|
"github.com/AdguardTeam/AdGuardHome/internal/aghhttp"
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/aghnet"
|
"github.com/AdguardTeam/AdGuardHome/internal/aghnet"
|
||||||
"github.com/AdguardTeam/dnsproxy/upstream"
|
|
||||||
"github.com/AdguardTeam/golibs/cache"
|
|
||||||
"github.com/AdguardTeam/golibs/errors"
|
"github.com/AdguardTeam/golibs/errors"
|
||||||
"github.com/AdguardTeam/golibs/log"
|
"github.com/AdguardTeam/golibs/log"
|
||||||
"github.com/AdguardTeam/golibs/mathutil"
|
"github.com/AdguardTeam/golibs/mathutil"
|
||||||
|
@ -75,6 +73,12 @@ type Resolver interface {
|
||||||
|
|
||||||
// Config allows you to configure DNS filtering with New() or just change variables directly.
|
// Config allows you to configure DNS filtering with New() or just change variables directly.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
|
// SafeBrowsingChecker is the safe browsing hash-prefix checker.
|
||||||
|
SafeBrowsingChecker Checker `yaml:"-"`
|
||||||
|
|
||||||
|
// ParentControl is the parental control hash-prefix checker.
|
||||||
|
ParentalControlChecker Checker `yaml:"-"`
|
||||||
|
|
||||||
// enabled is used to be returned within Settings.
|
// enabled is used to be returned within Settings.
|
||||||
//
|
//
|
||||||
// It is of type uint32 to be accessed by atomic.
|
// It is of type uint32 to be accessed by atomic.
|
||||||
|
@ -158,8 +162,22 @@ type hostChecker struct {
|
||||||
name string
|
name string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Checker is used for safe browsing or parental control hash-prefix filtering.
|
||||||
|
type Checker interface {
|
||||||
|
// Check returns true if request for the host should be blocked.
|
||||||
|
Check(host string) (block bool, err error)
|
||||||
|
}
|
||||||
|
|
||||||
// DNSFilter matches hostnames and DNS requests against filtering rules.
|
// DNSFilter matches hostnames and DNS requests against filtering rules.
|
||||||
type DNSFilter struct {
|
type DNSFilter struct {
|
||||||
|
safeSearch SafeSearch
|
||||||
|
|
||||||
|
// safeBrowsingChecker is the safe browsing hash-prefix checker.
|
||||||
|
safeBrowsingChecker Checker
|
||||||
|
|
||||||
|
// parentalControl is the parental control hash-prefix checker.
|
||||||
|
parentalControlChecker Checker
|
||||||
|
|
||||||
rulesStorage *filterlist.RuleStorage
|
rulesStorage *filterlist.RuleStorage
|
||||||
filteringEngine *urlfilter.DNSEngine
|
filteringEngine *urlfilter.DNSEngine
|
||||||
|
|
||||||
|
@ -168,14 +186,6 @@ type DNSFilter struct {
|
||||||
|
|
||||||
engineLock sync.RWMutex
|
engineLock sync.RWMutex
|
||||||
|
|
||||||
parentalServer string // access via methods
|
|
||||||
safeBrowsingServer string // access via methods
|
|
||||||
parentalUpstream upstream.Upstream
|
|
||||||
safeBrowsingUpstream upstream.Upstream
|
|
||||||
|
|
||||||
safebrowsingCache cache.Cache
|
|
||||||
parentalCache cache.Cache
|
|
||||||
|
|
||||||
Config // for direct access by library users, even a = assignment
|
Config // for direct access by library users, even a = assignment
|
||||||
// confLock protects Config.
|
// confLock protects Config.
|
||||||
confLock sync.RWMutex
|
confLock sync.RWMutex
|
||||||
|
@ -192,7 +202,6 @@ type DNSFilter struct {
|
||||||
// TODO(e.burkov): Don't use regexp for such a simple text processing task.
|
// TODO(e.burkov): Don't use regexp for such a simple text processing task.
|
||||||
filterTitleRegexp *regexp.Regexp
|
filterTitleRegexp *regexp.Regexp
|
||||||
|
|
||||||
safeSearch SafeSearch
|
|
||||||
hostCheckers []hostChecker
|
hostCheckers []hostChecker
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -940,19 +949,12 @@ func InitModule() {
|
||||||
// be non-nil.
|
// be non-nil.
|
||||||
func New(c *Config, blockFilters []Filter) (d *DNSFilter, err error) {
|
func New(c *Config, blockFilters []Filter) (d *DNSFilter, err error) {
|
||||||
d = &DNSFilter{
|
d = &DNSFilter{
|
||||||
refreshLock: &sync.Mutex{},
|
refreshLock: &sync.Mutex{},
|
||||||
filterTitleRegexp: regexp.MustCompile(`^! Title: +(.*)$`),
|
filterTitleRegexp: regexp.MustCompile(`^! Title: +(.*)$`),
|
||||||
|
safeBrowsingChecker: c.SafeBrowsingChecker,
|
||||||
|
parentalControlChecker: c.ParentalControlChecker,
|
||||||
}
|
}
|
||||||
|
|
||||||
d.safebrowsingCache = cache.New(cache.Config{
|
|
||||||
EnableLRU: true,
|
|
||||||
MaxSize: c.SafeBrowsingCacheSize,
|
|
||||||
})
|
|
||||||
d.parentalCache = cache.New(cache.Config{
|
|
||||||
EnableLRU: true,
|
|
||||||
MaxSize: c.ParentalCacheSize,
|
|
||||||
})
|
|
||||||
|
|
||||||
d.safeSearch = c.SafeSearch
|
d.safeSearch = c.SafeSearch
|
||||||
|
|
||||||
d.hostCheckers = []hostChecker{{
|
d.hostCheckers = []hostChecker{{
|
||||||
|
@ -977,11 +979,6 @@ func New(c *Config, blockFilters []Filter) (d *DNSFilter, err error) {
|
||||||
|
|
||||||
defer func() { err = errors.Annotate(err, "filtering: %w") }()
|
defer func() { err = errors.Annotate(err, "filtering: %w") }()
|
||||||
|
|
||||||
err = d.initSecurityServices()
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("initializing services: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.Config = *c
|
d.Config = *c
|
||||||
d.filtersMu = &sync.RWMutex{}
|
d.filtersMu = &sync.RWMutex{}
|
||||||
|
|
||||||
|
@ -1038,3 +1035,69 @@ func (d *DNSFilter) Start() {
|
||||||
// So for now we just start this periodic task from here.
|
// So for now we just start this periodic task from here.
|
||||||
go d.periodicallyRefreshFilters()
|
go d.periodicallyRefreshFilters()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Safe browsing and parental control methods.
|
||||||
|
|
||||||
|
// TODO(a.garipov): Unify with checkParental.
|
||||||
|
func (d *DNSFilter) checkSafeBrowsing(
|
||||||
|
host string,
|
||||||
|
_ uint16,
|
||||||
|
setts *Settings,
|
||||||
|
) (res Result, err error) {
|
||||||
|
if !setts.ProtectionEnabled || !setts.SafeBrowsingEnabled {
|
||||||
|
return Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if log.GetLevel() >= log.DEBUG {
|
||||||
|
timer := log.StartTimer()
|
||||||
|
defer timer.LogElapsed("safebrowsing lookup for %q", host)
|
||||||
|
}
|
||||||
|
|
||||||
|
res = Result{
|
||||||
|
Rules: []*ResultRule{{
|
||||||
|
Text: "adguard-malware-shavar",
|
||||||
|
FilterListID: SafeBrowsingListID,
|
||||||
|
}},
|
||||||
|
Reason: FilteredSafeBrowsing,
|
||||||
|
IsFiltered: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
block, err := d.safeBrowsingChecker.Check(host)
|
||||||
|
if !block || err != nil {
|
||||||
|
return Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(a.garipov): Unify with checkSafeBrowsing.
|
||||||
|
func (d *DNSFilter) checkParental(
|
||||||
|
host string,
|
||||||
|
_ uint16,
|
||||||
|
setts *Settings,
|
||||||
|
) (res Result, err error) {
|
||||||
|
if !setts.ProtectionEnabled || !setts.ParentalEnabled {
|
||||||
|
return Result{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if log.GetLevel() >= log.DEBUG {
|
||||||
|
timer := log.StartTimer()
|
||||||
|
defer timer.LogElapsed("parental lookup for %q", host)
|
||||||
|
}
|
||||||
|
|
||||||
|
res = Result{
|
||||||
|
Rules: []*ResultRule{{
|
||||||
|
Text: "parental CATEGORY_BLACKLISTED",
|
||||||
|
FilterListID: ParentalListID,
|
||||||
|
}},
|
||||||
|
Reason: FilteredParental,
|
||||||
|
IsFiltered: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
block, err := d.parentalControlChecker.Check(host)
|
||||||
|
if !block || err != nil {
|
||||||
|
return Result{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, nil
|
||||||
|
}
|
||||||
|
|
|
@ -7,7 +7,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/aghtest"
|
"github.com/AdguardTeam/AdGuardHome/internal/aghtest"
|
||||||
"github.com/AdguardTeam/golibs/cache"
|
"github.com/AdguardTeam/AdGuardHome/internal/filtering/hashprefix"
|
||||||
"github.com/AdguardTeam/golibs/log"
|
"github.com/AdguardTeam/golibs/log"
|
||||||
"github.com/AdguardTeam/golibs/testutil"
|
"github.com/AdguardTeam/golibs/testutil"
|
||||||
"github.com/AdguardTeam/urlfilter/rules"
|
"github.com/AdguardTeam/urlfilter/rules"
|
||||||
|
@ -27,17 +27,6 @@ const (
|
||||||
|
|
||||||
// Helpers.
|
// Helpers.
|
||||||
|
|
||||||
func purgeCaches(d *DNSFilter) {
|
|
||||||
for _, c := range []cache.Cache{
|
|
||||||
d.safebrowsingCache,
|
|
||||||
d.parentalCache,
|
|
||||||
} {
|
|
||||||
if c != nil {
|
|
||||||
c.Clear()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newForTest(t testing.TB, c *Config, filters []Filter) (f *DNSFilter, setts *Settings) {
|
func newForTest(t testing.TB, c *Config, filters []Filter) (f *DNSFilter, setts *Settings) {
|
||||||
setts = &Settings{
|
setts = &Settings{
|
||||||
ProtectionEnabled: true,
|
ProtectionEnabled: true,
|
||||||
|
@ -58,11 +47,17 @@ func newForTest(t testing.TB, c *Config, filters []Filter) (f *DNSFilter, setts
|
||||||
f, err := New(c, filters)
|
f, err := New(c, filters)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
purgeCaches(f)
|
|
||||||
|
|
||||||
return f, setts
|
return f, setts
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newChecker(host string) Checker {
|
||||||
|
return hashprefix.New(&hashprefix.Config{
|
||||||
|
CacheTime: 10,
|
||||||
|
CacheSize: 100000,
|
||||||
|
Upstream: aghtest.NewBlockUpstream(host, true),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
func (d *DNSFilter) checkMatch(t *testing.T, hostname string, setts *Settings) {
|
func (d *DNSFilter) checkMatch(t *testing.T, hostname string, setts *Settings) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
|
@ -175,10 +170,14 @@ func TestSafeBrowsing(t *testing.T) {
|
||||||
aghtest.ReplaceLogWriter(t, logOutput)
|
aghtest.ReplaceLogWriter(t, logOutput)
|
||||||
aghtest.ReplaceLogLevel(t, log.DEBUG)
|
aghtest.ReplaceLogLevel(t, log.DEBUG)
|
||||||
|
|
||||||
d, setts := newForTest(t, &Config{SafeBrowsingEnabled: true}, nil)
|
sbChecker := newChecker(sbBlocked)
|
||||||
|
|
||||||
|
d, setts := newForTest(t, &Config{
|
||||||
|
SafeBrowsingEnabled: true,
|
||||||
|
SafeBrowsingChecker: sbChecker,
|
||||||
|
}, nil)
|
||||||
t.Cleanup(d.Close)
|
t.Cleanup(d.Close)
|
||||||
|
|
||||||
d.SetSafeBrowsingUpstream(aghtest.NewBlockUpstream(sbBlocked, true))
|
|
||||||
d.checkMatch(t, sbBlocked, setts)
|
d.checkMatch(t, sbBlocked, setts)
|
||||||
|
|
||||||
require.Contains(t, logOutput.String(), fmt.Sprintf("safebrowsing lookup for %q", sbBlocked))
|
require.Contains(t, logOutput.String(), fmt.Sprintf("safebrowsing lookup for %q", sbBlocked))
|
||||||
|
@ -188,18 +187,17 @@ func TestSafeBrowsing(t *testing.T) {
|
||||||
d.checkMatchEmpty(t, pcBlocked, setts)
|
d.checkMatchEmpty(t, pcBlocked, setts)
|
||||||
|
|
||||||
// Cached result.
|
// Cached result.
|
||||||
d.safeBrowsingServer = "127.0.0.1"
|
|
||||||
d.checkMatch(t, sbBlocked, setts)
|
d.checkMatch(t, sbBlocked, setts)
|
||||||
d.checkMatchEmpty(t, pcBlocked, setts)
|
d.checkMatchEmpty(t, pcBlocked, setts)
|
||||||
d.safeBrowsingServer = defaultSafebrowsingServer
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestParallelSB(t *testing.T) {
|
func TestParallelSB(t *testing.T) {
|
||||||
d, setts := newForTest(t, &Config{SafeBrowsingEnabled: true}, nil)
|
d, setts := newForTest(t, &Config{
|
||||||
|
SafeBrowsingEnabled: true,
|
||||||
|
SafeBrowsingChecker: newChecker(sbBlocked),
|
||||||
|
}, nil)
|
||||||
t.Cleanup(d.Close)
|
t.Cleanup(d.Close)
|
||||||
|
|
||||||
d.SetSafeBrowsingUpstream(aghtest.NewBlockUpstream(sbBlocked, true))
|
|
||||||
|
|
||||||
t.Run("group", func(t *testing.T) {
|
t.Run("group", func(t *testing.T) {
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
t.Run(fmt.Sprintf("aaa%d", i), func(t *testing.T) {
|
t.Run(fmt.Sprintf("aaa%d", i), func(t *testing.T) {
|
||||||
|
@ -220,10 +218,12 @@ func TestParentalControl(t *testing.T) {
|
||||||
aghtest.ReplaceLogWriter(t, logOutput)
|
aghtest.ReplaceLogWriter(t, logOutput)
|
||||||
aghtest.ReplaceLogLevel(t, log.DEBUG)
|
aghtest.ReplaceLogLevel(t, log.DEBUG)
|
||||||
|
|
||||||
d, setts := newForTest(t, &Config{ParentalEnabled: true}, nil)
|
d, setts := newForTest(t, &Config{
|
||||||
|
ParentalEnabled: true,
|
||||||
|
ParentalControlChecker: newChecker(pcBlocked),
|
||||||
|
}, nil)
|
||||||
t.Cleanup(d.Close)
|
t.Cleanup(d.Close)
|
||||||
|
|
||||||
d.SetParentalUpstream(aghtest.NewBlockUpstream(pcBlocked, true))
|
|
||||||
d.checkMatch(t, pcBlocked, setts)
|
d.checkMatch(t, pcBlocked, setts)
|
||||||
require.Contains(t, logOutput.String(), fmt.Sprintf("parental lookup for %q", pcBlocked))
|
require.Contains(t, logOutput.String(), fmt.Sprintf("parental lookup for %q", pcBlocked))
|
||||||
|
|
||||||
|
@ -233,7 +233,6 @@ func TestParentalControl(t *testing.T) {
|
||||||
d.checkMatchEmpty(t, "api.jquery.com", setts)
|
d.checkMatchEmpty(t, "api.jquery.com", setts)
|
||||||
|
|
||||||
// Test cached result.
|
// Test cached result.
|
||||||
d.parentalServer = "127.0.0.1"
|
|
||||||
d.checkMatch(t, pcBlocked, setts)
|
d.checkMatch(t, pcBlocked, setts)
|
||||||
d.checkMatchEmpty(t, "yandex.ru", setts)
|
d.checkMatchEmpty(t, "yandex.ru", setts)
|
||||||
}
|
}
|
||||||
|
@ -593,8 +592,10 @@ func applyClientSettings(setts *Settings) {
|
||||||
func TestClientSettings(t *testing.T) {
|
func TestClientSettings(t *testing.T) {
|
||||||
d, setts := newForTest(t,
|
d, setts := newForTest(t,
|
||||||
&Config{
|
&Config{
|
||||||
ParentalEnabled: true,
|
ParentalEnabled: true,
|
||||||
SafeBrowsingEnabled: false,
|
SafeBrowsingEnabled: false,
|
||||||
|
SafeBrowsingChecker: newChecker(sbBlocked),
|
||||||
|
ParentalControlChecker: newChecker(pcBlocked),
|
||||||
},
|
},
|
||||||
[]Filter{{
|
[]Filter{{
|
||||||
ID: 0, Data: []byte("||example.org^\n"),
|
ID: 0, Data: []byte("||example.org^\n"),
|
||||||
|
@ -602,9 +603,6 @@ func TestClientSettings(t *testing.T) {
|
||||||
)
|
)
|
||||||
t.Cleanup(d.Close)
|
t.Cleanup(d.Close)
|
||||||
|
|
||||||
d.SetParentalUpstream(aghtest.NewBlockUpstream(pcBlocked, true))
|
|
||||||
d.SetSafeBrowsingUpstream(aghtest.NewBlockUpstream(sbBlocked, true))
|
|
||||||
|
|
||||||
type testCase struct {
|
type testCase struct {
|
||||||
name string
|
name string
|
||||||
host string
|
host string
|
||||||
|
@ -665,11 +663,12 @@ func TestClientSettings(t *testing.T) {
|
||||||
// Benchmarks.
|
// Benchmarks.
|
||||||
|
|
||||||
func BenchmarkSafeBrowsing(b *testing.B) {
|
func BenchmarkSafeBrowsing(b *testing.B) {
|
||||||
d, setts := newForTest(b, &Config{SafeBrowsingEnabled: true}, nil)
|
d, setts := newForTest(b, &Config{
|
||||||
|
SafeBrowsingEnabled: true,
|
||||||
|
SafeBrowsingChecker: newChecker(sbBlocked),
|
||||||
|
}, nil)
|
||||||
b.Cleanup(d.Close)
|
b.Cleanup(d.Close)
|
||||||
|
|
||||||
d.SetSafeBrowsingUpstream(aghtest.NewBlockUpstream(sbBlocked, true))
|
|
||||||
|
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
res, err := d.CheckHost(sbBlocked, dns.TypeA, setts)
|
res, err := d.CheckHost(sbBlocked, dns.TypeA, setts)
|
||||||
require.NoError(b, err)
|
require.NoError(b, err)
|
||||||
|
@ -679,11 +678,12 @@ func BenchmarkSafeBrowsing(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkSafeBrowsingParallel(b *testing.B) {
|
func BenchmarkSafeBrowsingParallel(b *testing.B) {
|
||||||
d, setts := newForTest(b, &Config{SafeBrowsingEnabled: true}, nil)
|
d, setts := newForTest(b, &Config{
|
||||||
|
SafeBrowsingEnabled: true,
|
||||||
|
SafeBrowsingChecker: newChecker(sbBlocked),
|
||||||
|
}, nil)
|
||||||
b.Cleanup(d.Close)
|
b.Cleanup(d.Close)
|
||||||
|
|
||||||
d.SetSafeBrowsingUpstream(aghtest.NewBlockUpstream(sbBlocked, true))
|
|
||||||
|
|
||||||
b.RunParallel(func(pb *testing.PB) {
|
b.RunParallel(func(pb *testing.PB) {
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
res, err := d.CheckHost(sbBlocked, dns.TypeA, setts)
|
res, err := d.CheckHost(sbBlocked, dns.TypeA, setts)
|
||||||
|
|
130
internal/filtering/hashprefix/cache.go
Normal file
130
internal/filtering/hashprefix/cache.go
Normal file
|
@ -0,0 +1,130 @@
|
||||||
|
package hashprefix
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/AdguardTeam/golibs/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
// expirySize is the size of expiry in cacheItem.
|
||||||
|
const expirySize = 8
|
||||||
|
|
||||||
|
// cacheItem represents an item that we will store in the cache.
|
||||||
|
type cacheItem struct {
|
||||||
|
// expiry is the time when cacheItem will expire.
|
||||||
|
expiry time.Time
|
||||||
|
|
||||||
|
// hashes is the hashed hostnames.
|
||||||
|
hashes []hostnameHash
|
||||||
|
}
|
||||||
|
|
||||||
|
// toCacheItem decodes cacheItem from data. data must be at least equal to
|
||||||
|
// expiry size.
|
||||||
|
func toCacheItem(data []byte) *cacheItem {
|
||||||
|
t := time.Unix(int64(binary.BigEndian.Uint64(data)), 0)
|
||||||
|
|
||||||
|
data = data[expirySize:]
|
||||||
|
hashes := make([]hostnameHash, len(data)/hashSize)
|
||||||
|
|
||||||
|
for i := 0; i < len(data); i += hashSize {
|
||||||
|
var hash hostnameHash
|
||||||
|
copy(hash[:], data[i:i+hashSize])
|
||||||
|
hashes = append(hashes, hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &cacheItem{
|
||||||
|
expiry: t,
|
||||||
|
hashes: hashes,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fromCacheItem encodes cacheItem into data.
|
||||||
|
func fromCacheItem(item *cacheItem) (data []byte) {
|
||||||
|
data = make([]byte, len(item.hashes)*hashSize+expirySize)
|
||||||
|
expiry := item.expiry.Unix()
|
||||||
|
binary.BigEndian.PutUint64(data[:expirySize], uint64(expiry))
|
||||||
|
|
||||||
|
for _, v := range item.hashes {
|
||||||
|
// nolint:looppointer // The subsilce is used for a copy.
|
||||||
|
data = append(data, v[:]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
|
||||||
|
// findInCache finds hashes in the cache. If nothing found returns list of
|
||||||
|
// hashes, prefixes of which will be sent to upstream.
|
||||||
|
func (c *Checker) findInCache(
|
||||||
|
hashes []hostnameHash,
|
||||||
|
) (found, blocked bool, hashesToRequest []hostnameHash) {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
i := 0
|
||||||
|
for _, hash := range hashes {
|
||||||
|
// nolint:looppointer // The subsilce is used for a safe cache lookup.
|
||||||
|
data := c.cache.Get(hash[:prefixLen])
|
||||||
|
if data == nil {
|
||||||
|
hashes[i] = hash
|
||||||
|
i++
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
item := toCacheItem(data)
|
||||||
|
if now.After(item.expiry) {
|
||||||
|
hashes[i] = hash
|
||||||
|
i++
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if ok := findMatch(hashes, item.hashes); ok {
|
||||||
|
return true, true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if i == 0 {
|
||||||
|
return true, false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, false, hashes[:i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// storeInCache caches hashes.
|
||||||
|
func (c *Checker) storeInCache(hashesToRequest, respHashes []hostnameHash) {
|
||||||
|
hashToStore := make(map[prefix][]hostnameHash)
|
||||||
|
|
||||||
|
for _, hash := range respHashes {
|
||||||
|
var pref prefix
|
||||||
|
// nolint:looppointer // The subsilce is used for a copy.
|
||||||
|
copy(pref[:], hash[:])
|
||||||
|
|
||||||
|
hashToStore[pref] = append(hashToStore[pref], hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
for pref, hash := range hashToStore {
|
||||||
|
// nolint:looppointer // The subsilce is used for a safe cache lookup.
|
||||||
|
c.setCache(pref[:], hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hash := range hashesToRequest {
|
||||||
|
// nolint:looppointer // The subsilce is used for a safe cache lookup.
|
||||||
|
pref := hash[:prefixLen]
|
||||||
|
val := c.cache.Get(pref)
|
||||||
|
if val == nil {
|
||||||
|
c.setCache(pref, nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// setCache stores hash in cache.
|
||||||
|
func (c *Checker) setCache(pref []byte, hashes []hostnameHash) {
|
||||||
|
item := &cacheItem{
|
||||||
|
expiry: time.Now().Add(c.cacheTime),
|
||||||
|
hashes: hashes,
|
||||||
|
}
|
||||||
|
|
||||||
|
c.cache.Set(pref, fromCacheItem(item))
|
||||||
|
log.Debug("%s: stored in cache: %v", c.svc, pref)
|
||||||
|
}
|
245
internal/filtering/hashprefix/hashprefix.go
Normal file
245
internal/filtering/hashprefix/hashprefix.go
Normal file
|
@ -0,0 +1,245 @@
|
||||||
|
// Package hashprefix used for safe browsing and parent control.
|
||||||
|
package hashprefix
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/AdguardTeam/dnsproxy/upstream"
|
||||||
|
"github.com/AdguardTeam/golibs/cache"
|
||||||
|
"github.com/AdguardTeam/golibs/log"
|
||||||
|
"github.com/AdguardTeam/golibs/netutil"
|
||||||
|
"github.com/AdguardTeam/golibs/stringutil"
|
||||||
|
"github.com/miekg/dns"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
"golang.org/x/net/publicsuffix"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// prefixLen is the length of the hash prefix of the filtered hostname.
|
||||||
|
prefixLen = 2
|
||||||
|
|
||||||
|
// hashSize is the size of hashed hostname.
|
||||||
|
hashSize = sha256.Size
|
||||||
|
|
||||||
|
// hexSize is the size of hexadecimal representation of hashed hostname.
|
||||||
|
hexSize = hashSize * 2
|
||||||
|
)
|
||||||
|
|
||||||
|
// prefix is the type of the SHA256 hash prefix used to match against the
|
||||||
|
// domain-name database.
|
||||||
|
type prefix [prefixLen]byte
|
||||||
|
|
||||||
|
// hostnameHash is the hashed hostname.
|
||||||
|
//
|
||||||
|
// TODO(s.chzhen): Split into prefix and suffix.
|
||||||
|
type hostnameHash [hashSize]byte
|
||||||
|
|
||||||
|
// findMatch returns true if one of the a hostnames matches one of the b.
|
||||||
|
func findMatch(a, b []hostnameHash) (matched bool) {
|
||||||
|
for _, hash := range a {
|
||||||
|
if slices.Contains(b, hash) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Config is the configuration structure for safe browsing and parental
|
||||||
|
// control.
|
||||||
|
type Config struct {
|
||||||
|
// Upstream is the upstream DNS server.
|
||||||
|
Upstream upstream.Upstream
|
||||||
|
|
||||||
|
// ServiceName is the name of the service.
|
||||||
|
ServiceName string
|
||||||
|
|
||||||
|
// TXTSuffix is the TXT suffix for DNS request.
|
||||||
|
TXTSuffix string
|
||||||
|
|
||||||
|
// CacheTime is the time period to store hash.
|
||||||
|
CacheTime time.Duration
|
||||||
|
|
||||||
|
// CacheSize is the maximum size of the cache. If it's zero, cache size is
|
||||||
|
// unlimited.
|
||||||
|
CacheSize uint
|
||||||
|
}
|
||||||
|
|
||||||
|
type Checker struct {
|
||||||
|
// upstream is the upstream DNS server.
|
||||||
|
upstream upstream.Upstream
|
||||||
|
|
||||||
|
// cache stores hostname hashes.
|
||||||
|
cache cache.Cache
|
||||||
|
|
||||||
|
// svc is the name of the service.
|
||||||
|
svc string
|
||||||
|
|
||||||
|
// txtSuffix is the TXT suffix for DNS request.
|
||||||
|
txtSuffix string
|
||||||
|
|
||||||
|
// cacheTime is the time period to store hash.
|
||||||
|
cacheTime time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns Checker.
|
||||||
|
func New(conf *Config) (c *Checker) {
|
||||||
|
return &Checker{
|
||||||
|
upstream: conf.Upstream,
|
||||||
|
cache: cache.New(cache.Config{
|
||||||
|
EnableLRU: true,
|
||||||
|
MaxSize: conf.CacheSize,
|
||||||
|
}),
|
||||||
|
svc: conf.ServiceName,
|
||||||
|
txtSuffix: conf.TXTSuffix,
|
||||||
|
cacheTime: conf.CacheTime,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check returns true if request for the host should be blocked.
|
||||||
|
func (c *Checker) Check(host string) (ok bool, err error) {
|
||||||
|
hashes := hostnameToHashes(host)
|
||||||
|
|
||||||
|
found, blocked, hashesToRequest := c.findInCache(hashes)
|
||||||
|
if found {
|
||||||
|
log.Debug("%s: found %q in cache, blocked: %t", c.svc, host, blocked)
|
||||||
|
|
||||||
|
return blocked, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
question := c.getQuestion(hashesToRequest)
|
||||||
|
|
||||||
|
log.Debug("%s: checking %s: %s", c.svc, host, question)
|
||||||
|
req := (&dns.Msg{}).SetQuestion(question, dns.TypeTXT)
|
||||||
|
|
||||||
|
resp, err := c.upstream.Exchange(req)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("getting hashes: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
matched, receivedHashes := c.processAnswer(hashesToRequest, resp, host)
|
||||||
|
|
||||||
|
c.storeInCache(hashesToRequest, receivedHashes)
|
||||||
|
|
||||||
|
return matched, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// hostnameToHashes returns hashes that should be checked by the hash prefix
|
||||||
|
// filter.
|
||||||
|
func hostnameToHashes(host string) (hashes []hostnameHash) {
|
||||||
|
// subDomainNum defines how many labels should be hashed to match against a
|
||||||
|
// hash prefix filter.
|
||||||
|
const subDomainNum = 4
|
||||||
|
|
||||||
|
pubSuf, icann := publicsuffix.PublicSuffix(host)
|
||||||
|
if !icann {
|
||||||
|
// Check the full private domain space.
|
||||||
|
pubSuf = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
nDots := 0
|
||||||
|
i := strings.LastIndexFunc(host, func(r rune) (ok bool) {
|
||||||
|
if r == '.' {
|
||||||
|
nDots++
|
||||||
|
}
|
||||||
|
|
||||||
|
return nDots == subDomainNum
|
||||||
|
})
|
||||||
|
if i != -1 {
|
||||||
|
host = host[i+1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
sub := netutil.Subdomains(host)
|
||||||
|
|
||||||
|
for _, s := range sub {
|
||||||
|
if s == pubSuf {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
sum := sha256.Sum256([]byte(s))
|
||||||
|
hashes = append(hashes, sum)
|
||||||
|
}
|
||||||
|
|
||||||
|
return hashes
|
||||||
|
}
|
||||||
|
|
||||||
|
// getQuestion combines hexadecimal encoded prefixes of hashed hostnames into
|
||||||
|
// string.
|
||||||
|
func (c *Checker) getQuestion(hashes []hostnameHash) (q string) {
|
||||||
|
b := &strings.Builder{}
|
||||||
|
|
||||||
|
for _, hash := range hashes {
|
||||||
|
// nolint:looppointer // The subsilce is used for safe hex encoding.
|
||||||
|
stringutil.WriteToBuilder(b, hex.EncodeToString(hash[:prefixLen]), ".")
|
||||||
|
}
|
||||||
|
|
||||||
|
stringutil.WriteToBuilder(b, c.txtSuffix)
|
||||||
|
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// processAnswer returns true if DNS response matches the hash, and received
|
||||||
|
// hashed hostnames from the upstream.
|
||||||
|
func (c *Checker) processAnswer(
|
||||||
|
hashesToRequest []hostnameHash,
|
||||||
|
resp *dns.Msg,
|
||||||
|
host string,
|
||||||
|
) (matched bool, receivedHashes []hostnameHash) {
|
||||||
|
txtCount := 0
|
||||||
|
|
||||||
|
for _, a := range resp.Answer {
|
||||||
|
txt, ok := a.(*dns.TXT)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
txtCount++
|
||||||
|
|
||||||
|
receivedHashes = c.appendHashesFromTXT(receivedHashes, txt, host)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debug("%s: received answer for %s with %d TXT count", c.svc, host, txtCount)
|
||||||
|
|
||||||
|
matched = findMatch(hashesToRequest, receivedHashes)
|
||||||
|
if matched {
|
||||||
|
log.Debug("%s: matched %s", c.svc, host)
|
||||||
|
|
||||||
|
return true, receivedHashes
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, receivedHashes
|
||||||
|
}
|
||||||
|
|
||||||
|
// appendHashesFromTXT appends received hashed hostnames.
|
||||||
|
func (c *Checker) appendHashesFromTXT(
|
||||||
|
hashes []hostnameHash,
|
||||||
|
txt *dns.TXT,
|
||||||
|
host string,
|
||||||
|
) (receivedHashes []hostnameHash) {
|
||||||
|
log.Debug("%s: received hashes for %s: %v", c.svc, host, txt.Txt)
|
||||||
|
|
||||||
|
for _, t := range txt.Txt {
|
||||||
|
if len(t) != hexSize {
|
||||||
|
log.Debug("%s: wrong hex size %d for %s %s", c.svc, len(t), host, t)
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, err := hex.DecodeString(t)
|
||||||
|
if err != nil {
|
||||||
|
log.Debug("%s: decoding hex string %s: %s", c.svc, t, err)
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
var hash hostnameHash
|
||||||
|
copy(hash[:], buf)
|
||||||
|
hashes = append(hashes, hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
return hashes
|
||||||
|
}
|
248
internal/filtering/hashprefix/hashprefix_internal_test.go
Normal file
248
internal/filtering/hashprefix/hashprefix_internal_test.go
Normal file
|
@ -0,0 +1,248 @@
|
||||||
|
package hashprefix
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/AdguardTeam/AdGuardHome/internal/aghtest"
|
||||||
|
"github.com/AdguardTeam/golibs/cache"
|
||||||
|
"github.com/miekg/dns"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
cacheTime = 10 * time.Minute
|
||||||
|
cacheSize = 10000
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestChcker_getQuestion(t *testing.T) {
|
||||||
|
const suf = "sb.dns.adguard.com."
|
||||||
|
|
||||||
|
// test hostnameToHashes()
|
||||||
|
hashes := hostnameToHashes("1.2.3.sub.host.com")
|
||||||
|
assert.Len(t, hashes, 3)
|
||||||
|
|
||||||
|
hash := sha256.Sum256([]byte("3.sub.host.com"))
|
||||||
|
hexPref1 := hex.EncodeToString(hash[:prefixLen])
|
||||||
|
assert.True(t, slices.Contains(hashes, hash))
|
||||||
|
|
||||||
|
hash = sha256.Sum256([]byte("sub.host.com"))
|
||||||
|
hexPref2 := hex.EncodeToString(hash[:prefixLen])
|
||||||
|
assert.True(t, slices.Contains(hashes, hash))
|
||||||
|
|
||||||
|
hash = sha256.Sum256([]byte("host.com"))
|
||||||
|
hexPref3 := hex.EncodeToString(hash[:prefixLen])
|
||||||
|
assert.True(t, slices.Contains(hashes, hash))
|
||||||
|
|
||||||
|
hash = sha256.Sum256([]byte("com"))
|
||||||
|
assert.False(t, slices.Contains(hashes, hash))
|
||||||
|
|
||||||
|
c := &Checker{
|
||||||
|
svc: "SafeBrowsing",
|
||||||
|
txtSuffix: suf,
|
||||||
|
}
|
||||||
|
|
||||||
|
q := c.getQuestion(hashes)
|
||||||
|
|
||||||
|
assert.Contains(t, q, hexPref1)
|
||||||
|
assert.Contains(t, q, hexPref2)
|
||||||
|
assert.Contains(t, q, hexPref3)
|
||||||
|
assert.True(t, strings.HasSuffix(q, suf))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHostnameToHashes(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
host string
|
||||||
|
wantLen int
|
||||||
|
}{{
|
||||||
|
name: "basic",
|
||||||
|
host: "example.com",
|
||||||
|
wantLen: 1,
|
||||||
|
}, {
|
||||||
|
name: "sub_basic",
|
||||||
|
host: "www.example.com",
|
||||||
|
wantLen: 2,
|
||||||
|
}, {
|
||||||
|
name: "private_domain",
|
||||||
|
host: "foo.co.uk",
|
||||||
|
wantLen: 1,
|
||||||
|
}, {
|
||||||
|
name: "sub_private_domain",
|
||||||
|
host: "bar.foo.co.uk",
|
||||||
|
wantLen: 2,
|
||||||
|
}, {
|
||||||
|
name: "private_domain_v2",
|
||||||
|
host: "foo.blogspot.co.uk",
|
||||||
|
wantLen: 4,
|
||||||
|
}, {
|
||||||
|
name: "sub_private_domain_v2",
|
||||||
|
host: "bar.foo.blogspot.co.uk",
|
||||||
|
wantLen: 4,
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
hashes := hostnameToHashes(tc.host)
|
||||||
|
assert.Len(t, hashes, tc.wantLen)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChecker_storeInCache(t *testing.T) {
|
||||||
|
c := &Checker{
|
||||||
|
svc: "SafeBrowsing",
|
||||||
|
cacheTime: cacheTime,
|
||||||
|
}
|
||||||
|
conf := cache.Config{}
|
||||||
|
c.cache = cache.New(conf)
|
||||||
|
|
||||||
|
// store in cache hashes for "3.sub.host.com" and "host.com"
|
||||||
|
// and empty data for hash-prefix for "sub.host.com"
|
||||||
|
hashes := []hostnameHash{}
|
||||||
|
hash := sha256.Sum256([]byte("sub.host.com"))
|
||||||
|
hashes = append(hashes, hash)
|
||||||
|
var hashesArray []hostnameHash
|
||||||
|
hash4 := sha256.Sum256([]byte("3.sub.host.com"))
|
||||||
|
hashesArray = append(hashesArray, hash4)
|
||||||
|
hash2 := sha256.Sum256([]byte("host.com"))
|
||||||
|
hashesArray = append(hashesArray, hash2)
|
||||||
|
c.storeInCache(hashes, hashesArray)
|
||||||
|
|
||||||
|
// match "3.sub.host.com" or "host.com" from cache
|
||||||
|
hashes = []hostnameHash{}
|
||||||
|
hash = sha256.Sum256([]byte("3.sub.host.com"))
|
||||||
|
hashes = append(hashes, hash)
|
||||||
|
hash = sha256.Sum256([]byte("sub.host.com"))
|
||||||
|
hashes = append(hashes, hash)
|
||||||
|
hash = sha256.Sum256([]byte("host.com"))
|
||||||
|
hashes = append(hashes, hash)
|
||||||
|
found, blocked, _ := c.findInCache(hashes)
|
||||||
|
assert.True(t, found)
|
||||||
|
assert.True(t, blocked)
|
||||||
|
|
||||||
|
// match "sub.host.com" from cache
|
||||||
|
hashes = []hostnameHash{}
|
||||||
|
hash = sha256.Sum256([]byte("sub.host.com"))
|
||||||
|
hashes = append(hashes, hash)
|
||||||
|
found, blocked, _ = c.findInCache(hashes)
|
||||||
|
assert.True(t, found)
|
||||||
|
assert.False(t, blocked)
|
||||||
|
|
||||||
|
// Match "sub.host.com" from cache. Another hash for "host.example" is not
|
||||||
|
// in the cache, so get data for it from the server.
|
||||||
|
hashes = []hostnameHash{}
|
||||||
|
hash = sha256.Sum256([]byte("sub.host.com"))
|
||||||
|
hashes = append(hashes, hash)
|
||||||
|
hash = sha256.Sum256([]byte("host.example"))
|
||||||
|
hashes = append(hashes, hash)
|
||||||
|
found, _, hashesToRequest := c.findInCache(hashes)
|
||||||
|
assert.False(t, found)
|
||||||
|
|
||||||
|
hash = sha256.Sum256([]byte("sub.host.com"))
|
||||||
|
ok := slices.Contains(hashesToRequest, hash)
|
||||||
|
assert.False(t, ok)
|
||||||
|
|
||||||
|
hash = sha256.Sum256([]byte("host.example"))
|
||||||
|
ok = slices.Contains(hashesToRequest, hash)
|
||||||
|
assert.True(t, ok)
|
||||||
|
|
||||||
|
c = &Checker{
|
||||||
|
svc: "SafeBrowsing",
|
||||||
|
cacheTime: cacheTime,
|
||||||
|
}
|
||||||
|
c.cache = cache.New(cache.Config{})
|
||||||
|
|
||||||
|
hashes = []hostnameHash{}
|
||||||
|
hash = sha256.Sum256([]byte("sub.host.com"))
|
||||||
|
hashes = append(hashes, hash)
|
||||||
|
|
||||||
|
c.cache.Set(hash[:prefixLen], make([]byte, expirySize+hashSize))
|
||||||
|
found, _, _ = c.findInCache(hashes)
|
||||||
|
assert.False(t, found)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestChecker_Check(t *testing.T) {
|
||||||
|
const hostname = "example.org"
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
wantBlock bool
|
||||||
|
}{{
|
||||||
|
name: "sb_no_block",
|
||||||
|
wantBlock: false,
|
||||||
|
}, {
|
||||||
|
name: "sb_block",
|
||||||
|
wantBlock: true,
|
||||||
|
}, {
|
||||||
|
name: "pc_no_block",
|
||||||
|
wantBlock: false,
|
||||||
|
}, {
|
||||||
|
name: "pc_block",
|
||||||
|
wantBlock: true,
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
c := New(&Config{
|
||||||
|
CacheTime: cacheTime,
|
||||||
|
CacheSize: cacheSize,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Prepare the upstream.
|
||||||
|
ups := aghtest.NewBlockUpstream(hostname, tc.wantBlock)
|
||||||
|
|
||||||
|
var numReq int
|
||||||
|
onExchange := ups.OnExchange
|
||||||
|
ups.OnExchange = func(req *dns.Msg) (resp *dns.Msg, err error) {
|
||||||
|
numReq++
|
||||||
|
|
||||||
|
return onExchange(req)
|
||||||
|
}
|
||||||
|
|
||||||
|
c.upstream = ups
|
||||||
|
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
// Firstly, check the request blocking.
|
||||||
|
hits := 0
|
||||||
|
res := false
|
||||||
|
res, err := c.Check(hostname)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if tc.wantBlock {
|
||||||
|
assert.True(t, res)
|
||||||
|
hits++
|
||||||
|
} else {
|
||||||
|
require.False(t, res)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the cache state, check the response is now cached.
|
||||||
|
assert.Equal(t, 1, c.cache.Stats().Count)
|
||||||
|
assert.Equal(t, hits, c.cache.Stats().Hit)
|
||||||
|
|
||||||
|
// There was one request to an upstream.
|
||||||
|
assert.Equal(t, 1, numReq)
|
||||||
|
|
||||||
|
// Now make the same request to check the cache was used.
|
||||||
|
res, err = c.Check(hostname)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
if tc.wantBlock {
|
||||||
|
assert.True(t, res)
|
||||||
|
} else {
|
||||||
|
require.False(t, res)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the cache state, it should've been used.
|
||||||
|
assert.Equal(t, 1, c.cache.Stats().Count)
|
||||||
|
assert.Equal(t, hits+1, c.cache.Stats().Hit)
|
||||||
|
|
||||||
|
// Check that there were no additional requests.
|
||||||
|
assert.Equal(t, 1, numReq)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/aghhttp"
|
"github.com/AdguardTeam/AdGuardHome/internal/aghhttp"
|
||||||
|
@ -458,6 +459,80 @@ func (d *DNSFilter) handleCheckHost(w http.ResponseWriter, r *http.Request) {
|
||||||
_ = aghhttp.WriteJSONResponse(w, r, resp)
|
_ = aghhttp.WriteJSONResponse(w, r, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setProtectedBool sets the value of a boolean pointer under a lock. l must
|
||||||
|
// protect the value under ptr.
|
||||||
|
//
|
||||||
|
// TODO(e.burkov): Make it generic?
|
||||||
|
func setProtectedBool(mu *sync.RWMutex, ptr *bool, val bool) {
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
|
||||||
|
*ptr = val
|
||||||
|
}
|
||||||
|
|
||||||
|
// protectedBool gets the value of a boolean pointer under a read lock. l must
|
||||||
|
// protect the value under ptr.
|
||||||
|
//
|
||||||
|
// TODO(e.burkov): Make it generic?
|
||||||
|
func protectedBool(mu *sync.RWMutex, ptr *bool) (val bool) {
|
||||||
|
mu.RLock()
|
||||||
|
defer mu.RUnlock()
|
||||||
|
|
||||||
|
return *ptr
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleSafeBrowsingEnable is the handler for the POST
|
||||||
|
// /control/safebrowsing/enable HTTP API.
|
||||||
|
func (d *DNSFilter) handleSafeBrowsingEnable(w http.ResponseWriter, r *http.Request) {
|
||||||
|
setProtectedBool(&d.confLock, &d.Config.SafeBrowsingEnabled, true)
|
||||||
|
d.Config.ConfigModified()
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleSafeBrowsingDisable is the handler for the POST
|
||||||
|
// /control/safebrowsing/disable HTTP API.
|
||||||
|
func (d *DNSFilter) handleSafeBrowsingDisable(w http.ResponseWriter, r *http.Request) {
|
||||||
|
setProtectedBool(&d.confLock, &d.Config.SafeBrowsingEnabled, false)
|
||||||
|
d.Config.ConfigModified()
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleSafeBrowsingStatus is the handler for the GET
|
||||||
|
// /control/safebrowsing/status HTTP API.
|
||||||
|
func (d *DNSFilter) handleSafeBrowsingStatus(w http.ResponseWriter, r *http.Request) {
|
||||||
|
resp := &struct {
|
||||||
|
Enabled bool `json:"enabled"`
|
||||||
|
}{
|
||||||
|
Enabled: protectedBool(&d.confLock, &d.Config.SafeBrowsingEnabled),
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = aghhttp.WriteJSONResponse(w, r, resp)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleParentalEnable is the handler for the POST /control/parental/enable
|
||||||
|
// HTTP API.
|
||||||
|
func (d *DNSFilter) handleParentalEnable(w http.ResponseWriter, r *http.Request) {
|
||||||
|
setProtectedBool(&d.confLock, &d.Config.ParentalEnabled, true)
|
||||||
|
d.Config.ConfigModified()
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleParentalDisable is the handler for the POST /control/parental/disable
|
||||||
|
// HTTP API.
|
||||||
|
func (d *DNSFilter) handleParentalDisable(w http.ResponseWriter, r *http.Request) {
|
||||||
|
setProtectedBool(&d.confLock, &d.Config.ParentalEnabled, false)
|
||||||
|
d.Config.ConfigModified()
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleParentalStatus is the handler for the GET /control/parental/status
|
||||||
|
// HTTP API.
|
||||||
|
func (d *DNSFilter) handleParentalStatus(w http.ResponseWriter, r *http.Request) {
|
||||||
|
resp := &struct {
|
||||||
|
Enabled bool `json:"enabled"`
|
||||||
|
}{
|
||||||
|
Enabled: protectedBool(&d.confLock, &d.Config.ParentalEnabled),
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = aghhttp.WriteJSONResponse(w, r, resp)
|
||||||
|
}
|
||||||
|
|
||||||
// RegisterFilteringHandlers - register handlers
|
// RegisterFilteringHandlers - register handlers
|
||||||
func (d *DNSFilter) RegisterFilteringHandlers() {
|
func (d *DNSFilter) RegisterFilteringHandlers() {
|
||||||
registerHTTP := d.HTTPRegister
|
registerHTTP := d.HTTPRegister
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/AdguardTeam/golibs/testutil"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -136,3 +137,171 @@ func TestDNSFilter_handleFilteringSetURL(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDNSFilter_handleSafeBrowsingStatus(t *testing.T) {
|
||||||
|
const (
|
||||||
|
testTimeout = time.Second
|
||||||
|
statusURL = "/control/safebrowsing/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
confModCh := make(chan struct{})
|
||||||
|
filtersDir := t.TempDir()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
url string
|
||||||
|
enabled bool
|
||||||
|
wantStatus assert.BoolAssertionFunc
|
||||||
|
}{{
|
||||||
|
name: "enable_off",
|
||||||
|
url: "/control/safebrowsing/enable",
|
||||||
|
enabled: false,
|
||||||
|
wantStatus: assert.True,
|
||||||
|
}, {
|
||||||
|
name: "enable_on",
|
||||||
|
url: "/control/safebrowsing/enable",
|
||||||
|
enabled: true,
|
||||||
|
wantStatus: assert.True,
|
||||||
|
}, {
|
||||||
|
name: "disable_on",
|
||||||
|
url: "/control/safebrowsing/disable",
|
||||||
|
enabled: true,
|
||||||
|
wantStatus: assert.False,
|
||||||
|
}, {
|
||||||
|
name: "disable_off",
|
||||||
|
url: "/control/safebrowsing/disable",
|
||||||
|
enabled: false,
|
||||||
|
wantStatus: assert.False,
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
handlers := make(map[string]http.Handler)
|
||||||
|
|
||||||
|
d, err := New(&Config{
|
||||||
|
ConfigModified: func() {
|
||||||
|
testutil.RequireSend(testutil.PanicT{}, confModCh, struct{}{}, testTimeout)
|
||||||
|
},
|
||||||
|
DataDir: filtersDir,
|
||||||
|
HTTPRegister: func(_, url string, handler http.HandlerFunc) {
|
||||||
|
handlers[url] = handler
|
||||||
|
},
|
||||||
|
SafeBrowsingEnabled: tc.enabled,
|
||||||
|
}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(d.Close)
|
||||||
|
|
||||||
|
d.RegisterFilteringHandlers()
|
||||||
|
require.NotEmpty(t, handlers)
|
||||||
|
require.Contains(t, handlers, statusURL)
|
||||||
|
|
||||||
|
r := httptest.NewRequest(http.MethodPost, tc.url, nil)
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
|
||||||
|
go handlers[tc.url].ServeHTTP(w, r)
|
||||||
|
|
||||||
|
testutil.RequireReceive(t, confModCh, testTimeout)
|
||||||
|
|
||||||
|
r = httptest.NewRequest(http.MethodGet, statusURL, nil)
|
||||||
|
w = httptest.NewRecorder()
|
||||||
|
|
||||||
|
handlers[statusURL].ServeHTTP(w, r)
|
||||||
|
require.Equal(t, http.StatusOK, w.Code)
|
||||||
|
|
||||||
|
status := struct {
|
||||||
|
Enabled bool `json:"enabled"`
|
||||||
|
}{
|
||||||
|
Enabled: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.NewDecoder(w.Body).Decode(&status)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
tc.wantStatus(t, status.Enabled)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDNSFilter_handleParentalStatus(t *testing.T) {
|
||||||
|
const (
|
||||||
|
testTimeout = time.Second
|
||||||
|
statusURL = "/control/parental/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
confModCh := make(chan struct{})
|
||||||
|
filtersDir := t.TempDir()
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
url string
|
||||||
|
enabled bool
|
||||||
|
wantStatus assert.BoolAssertionFunc
|
||||||
|
}{{
|
||||||
|
name: "enable_off",
|
||||||
|
url: "/control/parental/enable",
|
||||||
|
enabled: false,
|
||||||
|
wantStatus: assert.True,
|
||||||
|
}, {
|
||||||
|
name: "enable_on",
|
||||||
|
url: "/control/parental/enable",
|
||||||
|
enabled: true,
|
||||||
|
wantStatus: assert.True,
|
||||||
|
}, {
|
||||||
|
name: "disable_on",
|
||||||
|
url: "/control/parental/disable",
|
||||||
|
enabled: true,
|
||||||
|
wantStatus: assert.False,
|
||||||
|
}, {
|
||||||
|
name: "disable_off",
|
||||||
|
url: "/control/parental/disable",
|
||||||
|
enabled: false,
|
||||||
|
wantStatus: assert.False,
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
handlers := make(map[string]http.Handler)
|
||||||
|
|
||||||
|
d, err := New(&Config{
|
||||||
|
ConfigModified: func() {
|
||||||
|
testutil.RequireSend(testutil.PanicT{}, confModCh, struct{}{}, testTimeout)
|
||||||
|
},
|
||||||
|
DataDir: filtersDir,
|
||||||
|
HTTPRegister: func(_, url string, handler http.HandlerFunc) {
|
||||||
|
handlers[url] = handler
|
||||||
|
},
|
||||||
|
ParentalEnabled: tc.enabled,
|
||||||
|
}, nil)
|
||||||
|
require.NoError(t, err)
|
||||||
|
t.Cleanup(d.Close)
|
||||||
|
|
||||||
|
d.RegisterFilteringHandlers()
|
||||||
|
require.NotEmpty(t, handlers)
|
||||||
|
require.Contains(t, handlers, statusURL)
|
||||||
|
|
||||||
|
r := httptest.NewRequest(http.MethodPost, tc.url, nil)
|
||||||
|
w := httptest.NewRecorder()
|
||||||
|
|
||||||
|
go handlers[tc.url].ServeHTTP(w, r)
|
||||||
|
|
||||||
|
testutil.RequireReceive(t, confModCh, testTimeout)
|
||||||
|
|
||||||
|
r = httptest.NewRequest(http.MethodGet, statusURL, nil)
|
||||||
|
w = httptest.NewRecorder()
|
||||||
|
|
||||||
|
handlers[statusURL].ServeHTTP(w, r)
|
||||||
|
require.Equal(t, http.StatusOK, w.Code)
|
||||||
|
|
||||||
|
status := struct {
|
||||||
|
Enabled bool `json:"enabled"`
|
||||||
|
}{
|
||||||
|
Enabled: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.NewDecoder(w.Body).Decode(&status)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
tc.wantStatus(t, status.Enabled)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,433 +0,0 @@
|
||||||
package filtering
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/binary"
|
|
||||||
"encoding/hex"
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/aghhttp"
|
|
||||||
"github.com/AdguardTeam/dnsproxy/upstream"
|
|
||||||
"github.com/AdguardTeam/golibs/cache"
|
|
||||||
"github.com/AdguardTeam/golibs/log"
|
|
||||||
"github.com/AdguardTeam/golibs/stringutil"
|
|
||||||
"github.com/miekg/dns"
|
|
||||||
"golang.org/x/exp/slices"
|
|
||||||
"golang.org/x/net/publicsuffix"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Safe browsing and parental control methods.
|
|
||||||
|
|
||||||
// TODO(a.garipov): Make configurable.
|
|
||||||
const (
|
|
||||||
dnsTimeout = 3 * time.Second
|
|
||||||
defaultSafebrowsingServer = `https://family.adguard-dns.com/dns-query`
|
|
||||||
defaultParentalServer = `https://family.adguard-dns.com/dns-query`
|
|
||||||
sbTXTSuffix = `sb.dns.adguard.com.`
|
|
||||||
pcTXTSuffix = `pc.dns.adguard.com.`
|
|
||||||
)
|
|
||||||
|
|
||||||
// SetParentalUpstream sets the parental upstream for *DNSFilter.
|
|
||||||
//
|
|
||||||
// TODO(e.burkov): Remove this in v1 API to forbid the direct access.
|
|
||||||
func (d *DNSFilter) SetParentalUpstream(u upstream.Upstream) {
|
|
||||||
d.parentalUpstream = u
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetSafeBrowsingUpstream sets the safe browsing upstream for *DNSFilter.
|
|
||||||
//
|
|
||||||
// TODO(e.burkov): Remove this in v1 API to forbid the direct access.
|
|
||||||
func (d *DNSFilter) SetSafeBrowsingUpstream(u upstream.Upstream) {
|
|
||||||
d.safeBrowsingUpstream = u
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DNSFilter) initSecurityServices() error {
|
|
||||||
var err error
|
|
||||||
d.safeBrowsingServer = defaultSafebrowsingServer
|
|
||||||
d.parentalServer = defaultParentalServer
|
|
||||||
opts := &upstream.Options{
|
|
||||||
Timeout: dnsTimeout,
|
|
||||||
ServerIPAddrs: []net.IP{
|
|
||||||
{94, 140, 14, 15},
|
|
||||||
{94, 140, 15, 16},
|
|
||||||
net.ParseIP("2a10:50c0::bad1:ff"),
|
|
||||||
net.ParseIP("2a10:50c0::bad2:ff"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
parUps, err := upstream.AddressToUpstream(d.parentalServer, opts)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("converting parental server: %w", err)
|
|
||||||
}
|
|
||||||
d.SetParentalUpstream(parUps)
|
|
||||||
|
|
||||||
sbUps, err := upstream.AddressToUpstream(d.safeBrowsingServer, opts)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("converting safe browsing server: %w", err)
|
|
||||||
}
|
|
||||||
d.SetSafeBrowsingUpstream(sbUps)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
expire byte[4]
|
|
||||||
hash byte[32]
|
|
||||||
...
|
|
||||||
*/
|
|
||||||
func (c *sbCtx) setCache(prefix, hashes []byte) {
|
|
||||||
d := make([]byte, 4+len(hashes))
|
|
||||||
expire := uint(time.Now().Unix()) + c.cacheTime*60
|
|
||||||
binary.BigEndian.PutUint32(d[:4], uint32(expire))
|
|
||||||
copy(d[4:], hashes)
|
|
||||||
c.cache.Set(prefix, d)
|
|
||||||
log.Debug("%s: stored in cache: %v", c.svc, prefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
// findInHash returns 32-byte hash if it's found in hashToHost.
|
|
||||||
func (c *sbCtx) findInHash(val []byte) (hash32 [32]byte, found bool) {
|
|
||||||
for i := 4; i < len(val); i += 32 {
|
|
||||||
hash := val[i : i+32]
|
|
||||||
|
|
||||||
copy(hash32[:], hash[0:32])
|
|
||||||
|
|
||||||
_, found = c.hashToHost[hash32]
|
|
||||||
if found {
|
|
||||||
return hash32, found
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return [32]byte{}, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *sbCtx) getCached() int {
|
|
||||||
now := time.Now().Unix()
|
|
||||||
hashesToRequest := map[[32]byte]string{}
|
|
||||||
for k, v := range c.hashToHost {
|
|
||||||
// nolint:looppointer // The subsilce is used for a safe cache lookup.
|
|
||||||
val := c.cache.Get(k[0:2])
|
|
||||||
if val == nil || now >= int64(binary.BigEndian.Uint32(val)) {
|
|
||||||
hashesToRequest[k] = v
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if hash32, found := c.findInHash(val); found {
|
|
||||||
log.Debug("%s: found in cache: %s: blocked by %v", c.svc, c.host, hash32)
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(hashesToRequest) == 0 {
|
|
||||||
log.Debug("%s: found in cache: %s: not blocked", c.svc, c.host)
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
|
|
||||||
c.hashToHost = hashesToRequest
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
type sbCtx struct {
|
|
||||||
host string
|
|
||||||
svc string
|
|
||||||
hashToHost map[[32]byte]string
|
|
||||||
cache cache.Cache
|
|
||||||
cacheTime uint
|
|
||||||
}
|
|
||||||
|
|
||||||
func hostnameToHashes(host string) map[[32]byte]string {
|
|
||||||
hashes := map[[32]byte]string{}
|
|
||||||
tld, icann := publicsuffix.PublicSuffix(host)
|
|
||||||
if !icann {
|
|
||||||
// private suffixes like cloudfront.net
|
|
||||||
tld = ""
|
|
||||||
}
|
|
||||||
curhost := host
|
|
||||||
|
|
||||||
nDots := 0
|
|
||||||
for i := len(curhost) - 1; i >= 0; i-- {
|
|
||||||
if curhost[i] == '.' {
|
|
||||||
nDots++
|
|
||||||
if nDots == 4 {
|
|
||||||
curhost = curhost[i+1:] // "xxx.a.b.c.d" -> "a.b.c.d"
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for {
|
|
||||||
if curhost == "" {
|
|
||||||
// we've reached end of string
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if tld != "" && curhost == tld {
|
|
||||||
// we've reached the TLD, don't hash it
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
sum := sha256.Sum256([]byte(curhost))
|
|
||||||
hashes[sum] = curhost
|
|
||||||
|
|
||||||
pos := strings.IndexByte(curhost, byte('.'))
|
|
||||||
if pos < 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
curhost = curhost[pos+1:]
|
|
||||||
}
|
|
||||||
return hashes
|
|
||||||
}
|
|
||||||
|
|
||||||
// convert hash array to string
|
|
||||||
func (c *sbCtx) getQuestion() string {
|
|
||||||
b := &strings.Builder{}
|
|
||||||
|
|
||||||
for hash := range c.hashToHost {
|
|
||||||
// nolint:looppointer // The subsilce is used for safe hex encoding.
|
|
||||||
stringutil.WriteToBuilder(b, hex.EncodeToString(hash[0:2]), ".")
|
|
||||||
}
|
|
||||||
|
|
||||||
if c.svc == "SafeBrowsing" {
|
|
||||||
stringutil.WriteToBuilder(b, sbTXTSuffix)
|
|
||||||
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
stringutil.WriteToBuilder(b, pcTXTSuffix)
|
|
||||||
|
|
||||||
return b.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the target hash in TXT response
|
|
||||||
func (c *sbCtx) processTXT(resp *dns.Msg) (bool, [][]byte) {
|
|
||||||
matched := false
|
|
||||||
hashes := [][]byte{}
|
|
||||||
for _, a := range resp.Answer {
|
|
||||||
txt, ok := a.(*dns.TXT)
|
|
||||||
if !ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
log.Debug("%s: received hashes for %s: %v", c.svc, c.host, txt.Txt)
|
|
||||||
|
|
||||||
for _, t := range txt.Txt {
|
|
||||||
if len(t) != 32*2 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
hash, err := hex.DecodeString(t)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
hashes = append(hashes, hash)
|
|
||||||
|
|
||||||
if !matched {
|
|
||||||
var hash32 [32]byte
|
|
||||||
copy(hash32[:], hash)
|
|
||||||
|
|
||||||
var hashHost string
|
|
||||||
hashHost, ok = c.hashToHost[hash32]
|
|
||||||
if ok {
|
|
||||||
log.Debug("%s: matched %s by %s/%s", c.svc, c.host, hashHost, t)
|
|
||||||
matched = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return matched, hashes
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *sbCtx) storeCache(hashes [][]byte) {
|
|
||||||
slices.SortFunc(hashes, func(a, b []byte) (sortsBefore bool) {
|
|
||||||
return bytes.Compare(a, b) == -1
|
|
||||||
})
|
|
||||||
|
|
||||||
var curData []byte
|
|
||||||
var prevPrefix []byte
|
|
||||||
for i, hash := range hashes {
|
|
||||||
// nolint:looppointer // The subsilce is used for a safe comparison.
|
|
||||||
if !bytes.Equal(hash[0:2], prevPrefix) {
|
|
||||||
if i != 0 {
|
|
||||||
c.setCache(prevPrefix, curData)
|
|
||||||
curData = nil
|
|
||||||
}
|
|
||||||
prevPrefix = hashes[i][0:2]
|
|
||||||
}
|
|
||||||
curData = append(curData, hash...)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(prevPrefix) != 0 {
|
|
||||||
c.setCache(prevPrefix, curData)
|
|
||||||
}
|
|
||||||
|
|
||||||
for hash := range c.hashToHost {
|
|
||||||
// nolint:looppointer // The subsilce is used for a safe cache lookup.
|
|
||||||
prefix := hash[0:2]
|
|
||||||
val := c.cache.Get(prefix)
|
|
||||||
if val == nil {
|
|
||||||
c.setCache(prefix, nil)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func check(c *sbCtx, r Result, u upstream.Upstream) (Result, error) {
|
|
||||||
c.hashToHost = hostnameToHashes(c.host)
|
|
||||||
switch c.getCached() {
|
|
||||||
case -1:
|
|
||||||
return Result{}, nil
|
|
||||||
case 1:
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
question := c.getQuestion()
|
|
||||||
|
|
||||||
log.Tracef("%s: checking %s: %s", c.svc, c.host, question)
|
|
||||||
req := (&dns.Msg{}).SetQuestion(question, dns.TypeTXT)
|
|
||||||
|
|
||||||
resp, err := u.Exchange(req)
|
|
||||||
if err != nil {
|
|
||||||
return Result{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
matched, receivedHashes := c.processTXT(resp)
|
|
||||||
|
|
||||||
c.storeCache(receivedHashes)
|
|
||||||
if matched {
|
|
||||||
return r, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return Result{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(a.garipov): Unify with checkParental.
|
|
||||||
func (d *DNSFilter) checkSafeBrowsing(
|
|
||||||
host string,
|
|
||||||
_ uint16,
|
|
||||||
setts *Settings,
|
|
||||||
) (res Result, err error) {
|
|
||||||
if !setts.ProtectionEnabled || !setts.SafeBrowsingEnabled {
|
|
||||||
return Result{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if log.GetLevel() >= log.DEBUG {
|
|
||||||
timer := log.StartTimer()
|
|
||||||
defer timer.LogElapsed("safebrowsing lookup for %q", host)
|
|
||||||
}
|
|
||||||
|
|
||||||
sctx := &sbCtx{
|
|
||||||
host: host,
|
|
||||||
svc: "SafeBrowsing",
|
|
||||||
cache: d.safebrowsingCache,
|
|
||||||
cacheTime: d.Config.CacheTime,
|
|
||||||
}
|
|
||||||
|
|
||||||
res = Result{
|
|
||||||
Rules: []*ResultRule{{
|
|
||||||
Text: "adguard-malware-shavar",
|
|
||||||
FilterListID: SafeBrowsingListID,
|
|
||||||
}},
|
|
||||||
Reason: FilteredSafeBrowsing,
|
|
||||||
IsFiltered: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
return check(sctx, res, d.safeBrowsingUpstream)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO(a.garipov): Unify with checkSafeBrowsing.
|
|
||||||
func (d *DNSFilter) checkParental(
|
|
||||||
host string,
|
|
||||||
_ uint16,
|
|
||||||
setts *Settings,
|
|
||||||
) (res Result, err error) {
|
|
||||||
if !setts.ProtectionEnabled || !setts.ParentalEnabled {
|
|
||||||
return Result{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if log.GetLevel() >= log.DEBUG {
|
|
||||||
timer := log.StartTimer()
|
|
||||||
defer timer.LogElapsed("parental lookup for %q", host)
|
|
||||||
}
|
|
||||||
|
|
||||||
sctx := &sbCtx{
|
|
||||||
host: host,
|
|
||||||
svc: "Parental",
|
|
||||||
cache: d.parentalCache,
|
|
||||||
cacheTime: d.Config.CacheTime,
|
|
||||||
}
|
|
||||||
|
|
||||||
res = Result{
|
|
||||||
Rules: []*ResultRule{{
|
|
||||||
Text: "parental CATEGORY_BLACKLISTED",
|
|
||||||
FilterListID: ParentalListID,
|
|
||||||
}},
|
|
||||||
Reason: FilteredParental,
|
|
||||||
IsFiltered: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
return check(sctx, res, d.parentalUpstream)
|
|
||||||
}
|
|
||||||
|
|
||||||
// setProtectedBool sets the value of a boolean pointer under a lock. l must
|
|
||||||
// protect the value under ptr.
|
|
||||||
//
|
|
||||||
// TODO(e.burkov): Make it generic?
|
|
||||||
func setProtectedBool(mu *sync.RWMutex, ptr *bool, val bool) {
|
|
||||||
mu.Lock()
|
|
||||||
defer mu.Unlock()
|
|
||||||
|
|
||||||
*ptr = val
|
|
||||||
}
|
|
||||||
|
|
||||||
// protectedBool gets the value of a boolean pointer under a read lock. l must
|
|
||||||
// protect the value under ptr.
|
|
||||||
//
|
|
||||||
// TODO(e.burkov): Make it generic?
|
|
||||||
func protectedBool(mu *sync.RWMutex, ptr *bool) (val bool) {
|
|
||||||
mu.RLock()
|
|
||||||
defer mu.RUnlock()
|
|
||||||
|
|
||||||
return *ptr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DNSFilter) handleSafeBrowsingEnable(w http.ResponseWriter, r *http.Request) {
|
|
||||||
setProtectedBool(&d.confLock, &d.Config.SafeBrowsingEnabled, true)
|
|
||||||
d.Config.ConfigModified()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DNSFilter) handleSafeBrowsingDisable(w http.ResponseWriter, r *http.Request) {
|
|
||||||
setProtectedBool(&d.confLock, &d.Config.SafeBrowsingEnabled, false)
|
|
||||||
d.Config.ConfigModified()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DNSFilter) handleSafeBrowsingStatus(w http.ResponseWriter, r *http.Request) {
|
|
||||||
resp := &struct {
|
|
||||||
Enabled bool `json:"enabled"`
|
|
||||||
}{
|
|
||||||
Enabled: protectedBool(&d.confLock, &d.Config.SafeBrowsingEnabled),
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = aghhttp.WriteJSONResponse(w, r, resp)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DNSFilter) handleParentalEnable(w http.ResponseWriter, r *http.Request) {
|
|
||||||
setProtectedBool(&d.confLock, &d.Config.ParentalEnabled, true)
|
|
||||||
d.Config.ConfigModified()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DNSFilter) handleParentalDisable(w http.ResponseWriter, r *http.Request) {
|
|
||||||
setProtectedBool(&d.confLock, &d.Config.ParentalEnabled, false)
|
|
||||||
d.Config.ConfigModified()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *DNSFilter) handleParentalStatus(w http.ResponseWriter, r *http.Request) {
|
|
||||||
resp := &struct {
|
|
||||||
Enabled bool `json:"enabled"`
|
|
||||||
}{
|
|
||||||
Enabled: protectedBool(&d.confLock, &d.Config.ParentalEnabled),
|
|
||||||
}
|
|
||||||
|
|
||||||
_ = aghhttp.WriteJSONResponse(w, r, resp)
|
|
||||||
}
|
|
|
@ -1,226 +0,0 @@
|
||||||
package filtering
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/sha256"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/aghtest"
|
|
||||||
"github.com/AdguardTeam/golibs/cache"
|
|
||||||
"github.com/miekg/dns"
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestSafeBrowsingHash(t *testing.T) {
|
|
||||||
// test hostnameToHashes()
|
|
||||||
hashes := hostnameToHashes("1.2.3.sub.host.com")
|
|
||||||
assert.Len(t, hashes, 3)
|
|
||||||
_, ok := hashes[sha256.Sum256([]byte("3.sub.host.com"))]
|
|
||||||
assert.True(t, ok)
|
|
||||||
_, ok = hashes[sha256.Sum256([]byte("sub.host.com"))]
|
|
||||||
assert.True(t, ok)
|
|
||||||
_, ok = hashes[sha256.Sum256([]byte("host.com"))]
|
|
||||||
assert.True(t, ok)
|
|
||||||
_, ok = hashes[sha256.Sum256([]byte("com"))]
|
|
||||||
assert.False(t, ok)
|
|
||||||
|
|
||||||
c := &sbCtx{
|
|
||||||
svc: "SafeBrowsing",
|
|
||||||
hashToHost: hashes,
|
|
||||||
}
|
|
||||||
|
|
||||||
q := c.getQuestion()
|
|
||||||
|
|
||||||
assert.Contains(t, q, "7a1b.")
|
|
||||||
assert.Contains(t, q, "af5a.")
|
|
||||||
assert.Contains(t, q, "eb11.")
|
|
||||||
assert.True(t, strings.HasSuffix(q, "sb.dns.adguard.com."))
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSafeBrowsingCache(t *testing.T) {
|
|
||||||
c := &sbCtx{
|
|
||||||
svc: "SafeBrowsing",
|
|
||||||
cacheTime: 100,
|
|
||||||
}
|
|
||||||
conf := cache.Config{}
|
|
||||||
c.cache = cache.New(conf)
|
|
||||||
|
|
||||||
// store in cache hashes for "3.sub.host.com" and "host.com"
|
|
||||||
// and empty data for hash-prefix for "sub.host.com"
|
|
||||||
hash := sha256.Sum256([]byte("sub.host.com"))
|
|
||||||
c.hashToHost = make(map[[32]byte]string)
|
|
||||||
c.hashToHost[hash] = "sub.host.com"
|
|
||||||
var hashesArray [][]byte
|
|
||||||
hash4 := sha256.Sum256([]byte("3.sub.host.com"))
|
|
||||||
hashesArray = append(hashesArray, hash4[:])
|
|
||||||
hash2 := sha256.Sum256([]byte("host.com"))
|
|
||||||
hashesArray = append(hashesArray, hash2[:])
|
|
||||||
c.storeCache(hashesArray)
|
|
||||||
|
|
||||||
// match "3.sub.host.com" or "host.com" from cache
|
|
||||||
c.hashToHost = make(map[[32]byte]string)
|
|
||||||
hash = sha256.Sum256([]byte("3.sub.host.com"))
|
|
||||||
c.hashToHost[hash] = "3.sub.host.com"
|
|
||||||
hash = sha256.Sum256([]byte("sub.host.com"))
|
|
||||||
c.hashToHost[hash] = "sub.host.com"
|
|
||||||
hash = sha256.Sum256([]byte("host.com"))
|
|
||||||
c.hashToHost[hash] = "host.com"
|
|
||||||
assert.Equal(t, 1, c.getCached())
|
|
||||||
|
|
||||||
// match "sub.host.com" from cache
|
|
||||||
c.hashToHost = make(map[[32]byte]string)
|
|
||||||
hash = sha256.Sum256([]byte("sub.host.com"))
|
|
||||||
c.hashToHost[hash] = "sub.host.com"
|
|
||||||
assert.Equal(t, -1, c.getCached())
|
|
||||||
|
|
||||||
// Match "sub.host.com" from cache. Another hash for "host.example" is not
|
|
||||||
// in the cache, so get data for it from the server.
|
|
||||||
c.hashToHost = make(map[[32]byte]string)
|
|
||||||
hash = sha256.Sum256([]byte("sub.host.com"))
|
|
||||||
c.hashToHost[hash] = "sub.host.com"
|
|
||||||
hash = sha256.Sum256([]byte("host.example"))
|
|
||||||
c.hashToHost[hash] = "host.example"
|
|
||||||
assert.Empty(t, c.getCached())
|
|
||||||
|
|
||||||
hash = sha256.Sum256([]byte("sub.host.com"))
|
|
||||||
_, ok := c.hashToHost[hash]
|
|
||||||
assert.False(t, ok)
|
|
||||||
|
|
||||||
hash = sha256.Sum256([]byte("host.example"))
|
|
||||||
_, ok = c.hashToHost[hash]
|
|
||||||
assert.True(t, ok)
|
|
||||||
|
|
||||||
c = &sbCtx{
|
|
||||||
svc: "SafeBrowsing",
|
|
||||||
cacheTime: 100,
|
|
||||||
}
|
|
||||||
conf = cache.Config{}
|
|
||||||
c.cache = cache.New(conf)
|
|
||||||
|
|
||||||
hash = sha256.Sum256([]byte("sub.host.com"))
|
|
||||||
c.hashToHost = make(map[[32]byte]string)
|
|
||||||
c.hashToHost[hash] = "sub.host.com"
|
|
||||||
|
|
||||||
c.cache.Set(hash[0:2], make([]byte, 32))
|
|
||||||
assert.Empty(t, c.getCached())
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSBPC_checkErrorUpstream(t *testing.T) {
|
|
||||||
d, _ := newForTest(t, &Config{SafeBrowsingEnabled: true}, nil)
|
|
||||||
t.Cleanup(d.Close)
|
|
||||||
|
|
||||||
ups := aghtest.NewErrorUpstream()
|
|
||||||
d.SetSafeBrowsingUpstream(ups)
|
|
||||||
d.SetParentalUpstream(ups)
|
|
||||||
|
|
||||||
setts := &Settings{
|
|
||||||
ProtectionEnabled: true,
|
|
||||||
SafeBrowsingEnabled: true,
|
|
||||||
ParentalEnabled: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := d.checkSafeBrowsing("smthng.com", dns.TypeA, setts)
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
_, err = d.checkParental("smthng.com", dns.TypeA, setts)
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSBPC(t *testing.T) {
|
|
||||||
d, _ := newForTest(t, &Config{SafeBrowsingEnabled: true}, nil)
|
|
||||||
t.Cleanup(d.Close)
|
|
||||||
|
|
||||||
const hostname = "example.org"
|
|
||||||
|
|
||||||
setts := &Settings{
|
|
||||||
ProtectionEnabled: true,
|
|
||||||
SafeBrowsingEnabled: true,
|
|
||||||
ParentalEnabled: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
testCases := []struct {
|
|
||||||
testCache cache.Cache
|
|
||||||
testFunc func(host string, _ uint16, _ *Settings) (res Result, err error)
|
|
||||||
name string
|
|
||||||
block bool
|
|
||||||
}{{
|
|
||||||
testCache: d.safebrowsingCache,
|
|
||||||
testFunc: d.checkSafeBrowsing,
|
|
||||||
name: "sb_no_block",
|
|
||||||
block: false,
|
|
||||||
}, {
|
|
||||||
testCache: d.safebrowsingCache,
|
|
||||||
testFunc: d.checkSafeBrowsing,
|
|
||||||
name: "sb_block",
|
|
||||||
block: true,
|
|
||||||
}, {
|
|
||||||
testCache: d.parentalCache,
|
|
||||||
testFunc: d.checkParental,
|
|
||||||
name: "pc_no_block",
|
|
||||||
block: false,
|
|
||||||
}, {
|
|
||||||
testCache: d.parentalCache,
|
|
||||||
testFunc: d.checkParental,
|
|
||||||
name: "pc_block",
|
|
||||||
block: true,
|
|
||||||
}}
|
|
||||||
|
|
||||||
for _, tc := range testCases {
|
|
||||||
// Prepare the upstream.
|
|
||||||
ups := aghtest.NewBlockUpstream(hostname, tc.block)
|
|
||||||
|
|
||||||
var numReq int
|
|
||||||
onExchange := ups.OnExchange
|
|
||||||
ups.OnExchange = func(req *dns.Msg) (resp *dns.Msg, err error) {
|
|
||||||
numReq++
|
|
||||||
|
|
||||||
return onExchange(req)
|
|
||||||
}
|
|
||||||
|
|
||||||
d.SetSafeBrowsingUpstream(ups)
|
|
||||||
d.SetParentalUpstream(ups)
|
|
||||||
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
// Firstly, check the request blocking.
|
|
||||||
hits := 0
|
|
||||||
res, err := tc.testFunc(hostname, dns.TypeA, setts)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if tc.block {
|
|
||||||
assert.True(t, res.IsFiltered)
|
|
||||||
require.Len(t, res.Rules, 1)
|
|
||||||
hits++
|
|
||||||
} else {
|
|
||||||
require.False(t, res.IsFiltered)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the cache state, check the response is now cached.
|
|
||||||
assert.Equal(t, 1, tc.testCache.Stats().Count)
|
|
||||||
assert.Equal(t, hits, tc.testCache.Stats().Hit)
|
|
||||||
|
|
||||||
// There was one request to an upstream.
|
|
||||||
assert.Equal(t, 1, numReq)
|
|
||||||
|
|
||||||
// Now make the same request to check the cache was used.
|
|
||||||
res, err = tc.testFunc(hostname, dns.TypeA, setts)
|
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
if tc.block {
|
|
||||||
assert.True(t, res.IsFiltered)
|
|
||||||
require.Len(t, res.Rules, 1)
|
|
||||||
} else {
|
|
||||||
require.False(t, res.IsFiltered)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the cache state, it should've been used.
|
|
||||||
assert.Equal(t, 1, tc.testCache.Stats().Count)
|
|
||||||
assert.Equal(t, hits+1, tc.testCache.Stats().Hit)
|
|
||||||
|
|
||||||
// Check that there were no additional requests.
|
|
||||||
assert.Equal(t, 1, numReq)
|
|
||||||
})
|
|
||||||
|
|
||||||
purgeCaches(d)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -12,6 +12,14 @@ type blockedService struct {
|
||||||
|
|
||||||
// blockedServices contains raw blocked service data.
|
// blockedServices contains raw blocked service data.
|
||||||
var blockedServices = []blockedService{{
|
var blockedServices = []blockedService{{
|
||||||
|
ID: "500px",
|
||||||
|
Name: "500px",
|
||||||
|
IconSVG: []byte("<svg xmlns=\"http://www.w3.org/2000/svg\" fill=\"currentColor\" viewBox=\"0 0 50 50\"><path d=\"M 5 14 L 2.5 26 L 6.800781 26 C 6.800781 26 7.699219 24.300781 10.199219 24.300781 C 12.699219 24.300781 14 26.199219 14 28.300781 C 14 30.402344 12.5 32.800781 10.199219 32.800781 C 7.898438 32.800781 6.5 30.398438 6.5 29 L 2 29 C 2 30.199219 3 36 10.199219 36 C 15.15625 36 17.417969 33.121094 18.015625 31.898438 C 19.386719 34.34375 21.992188 36 24.984375 36 C 27.253906 36 29.777344 34.808594 32.5 32.453125 C 35.222656 34.808594 37.746094 36 40.015625 36 C 44.417969 36 48 32.410156 48 28 C 48 23.589844 44.417969 20 40.015625 20 C 37.746094 20 35.222656 21.191406 32.5 23.546875 C 29.777344 21.191406 27.253906 20 24.984375 20 C 21.832031 20 19.105469 21.847656 17.8125 24.511719 C 17.113281 23.382813 15.414063 21 11.902344 21 C 8.101563 21 7.300781 22.597656 7.300781 22.597656 C 7.300781 22.597656 7.699219 21.300781 8.300781 18 L 17 18 L 17 14 Z M 24.984375 25 C 25.453125 25 26.800781 25.226563 29.230469 27.328125 L 30.011719 28 L 29.230469 28.671875 C 26.800781 30.773438 25.453125 31 24.984375 31 C 23.339844 31 22 29.652344 22 28 C 22 26.347656 23.339844 25 24.984375 25 Z M 40.015625 25 C 41.660156 25 43 26.347656 43 28 C 43 29.652344 41.660156 31 40.015625 31 C 39.546875 31 38.199219 30.773438 35.769531 28.671875 L 34.988281 28 L 35.769531 27.328125 C 38.199219 25.226563 39.546875 25 40.015625 25 Z\"/></svg>"),
|
||||||
|
Rules: []string{
|
||||||
|
"||500px.com^",
|
||||||
|
"||500px.org^",
|
||||||
|
},
|
||||||
|
}, {
|
||||||
ID: "9gag",
|
ID: "9gag",
|
||||||
Name: "9GAG",
|
Name: "9GAG",
|
||||||
IconSVG: []byte("<svg xmlns=\"http://www.w3.org/2000/svg\" fill=\"currentColor\" viewBox=\"0 0 50 50\"><path d=\"M 44 14 C 44 13.644531 43.8125 13.316406 43.507813 13.136719 C 40.453125 11.347656 28.46875 4.847656 25.535156 3.136719 C 25.222656 2.957031 24.839844 2.957031 24.527344 3.136719 C 21.128906 5.117188 10.089844 11.621094 7.496094 13.136719 C 7.1875 13.316406 7 13.644531 7 14 L 7 20 C 7 20.378906 7.214844 20.722656 7.550781 20.894531 C 7.660156 20.949219 18.597656 26.453125 24.5 29.867188 C 24.8125 30.046875 25.195313 30.046875 25.507813 29.863281 C 27.269531 28.828125 29.117188 27.859375 30.902344 26.921875 C 32.253906 26.214844 33.636719 25.488281 35.003906 24.722656 C 35.007813 26.820313 35.003906 29.296875 35 30.40625 L 25 35.859375 L 14.480469 30.121094 C 14.144531 29.9375 13.730469 29.964844 13.417969 30.1875 L 6.417969 35.1875 C 6.140625 35.386719 5.980469 35.714844 6.003906 36.054688 C 6.023438 36.398438 6.214844 36.707031 6.515625 36.871094 L 24.542969 46.871094 C 24.695313 46.957031 24.859375 47 25.027344 47 C 25.195313 47 25.363281 46.957031 25.515625 46.875 L 43.484375 36.875 C 43.804688 36.695313 44 36.363281 44 36 C 44 36 43.992188 21.011719 44 14 Z M 25 20 L 18 16 L 25 12 L 32 16 Z\" /></svg>"),
|
IconSVG: []byte("<svg xmlns=\"http://www.w3.org/2000/svg\" fill=\"currentColor\" viewBox=\"0 0 50 50\"><path d=\"M 44 14 C 44 13.644531 43.8125 13.316406 43.507813 13.136719 C 40.453125 11.347656 28.46875 4.847656 25.535156 3.136719 C 25.222656 2.957031 24.839844 2.957031 24.527344 3.136719 C 21.128906 5.117188 10.089844 11.621094 7.496094 13.136719 C 7.1875 13.316406 7 13.644531 7 14 L 7 20 C 7 20.378906 7.214844 20.722656 7.550781 20.894531 C 7.660156 20.949219 18.597656 26.453125 24.5 29.867188 C 24.8125 30.046875 25.195313 30.046875 25.507813 29.863281 C 27.269531 28.828125 29.117188 27.859375 30.902344 26.921875 C 32.253906 26.214844 33.636719 25.488281 35.003906 24.722656 C 35.007813 26.820313 35.003906 29.296875 35 30.40625 L 25 35.859375 L 14.480469 30.121094 C 14.144531 29.9375 13.730469 29.964844 13.417969 30.1875 L 6.417969 35.1875 C 6.140625 35.386719 5.980469 35.714844 6.003906 36.054688 C 6.023438 36.398438 6.214844 36.707031 6.515625 36.871094 L 24.542969 46.871094 C 24.695313 46.957031 24.859375 47 25.027344 47 C 25.195313 47 25.363281 46.957031 25.515625 46.875 L 43.484375 36.875 C 43.804688 36.695313 44 36.363281 44 36 C 44 36 43.992188 21.011719 44 14 Z M 25 20 L 18 16 L 25 12 L 32 16 Z\" /></svg>"),
|
||||||
|
@ -1180,6 +1188,18 @@ var blockedServices = []blockedService{{
|
||||||
"||zuckerberg.com^",
|
"||zuckerberg.com^",
|
||||||
"||zuckerberg.net^",
|
"||zuckerberg.net^",
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
ID: "flickr",
|
||||||
|
Name: "Flickr",
|
||||||
|
IconSVG: []byte("<svg xmlns=\"http://www.w3.org/2000/svg\" fill=\"currentColor\" viewBox=\"0 0 50 50\"><path d=\"M 9 4 C 6.2504839 4 4 6.2504839 4 9 L 4 41 C 4 43.749516 6.2504839 46 9 46 L 41 46 C 43.749516 46 46 43.749516 46 41 L 46 9 C 46 6.2504839 43.749516 4 41 4 L 9 4 z M 9 6 L 41 6 C 42.668484 6 44 7.3315161 44 9 L 44 41 C 44 42.668484 42.668484 44 41 44 L 9 44 C 7.3315161 44 6 42.668484 6 41 L 6 9 C 6 7.3315161 7.3315161 6 9 6 z M 16 17 C 11.59 17 8 20.59 8 25 C 8 29.41 11.59 33 16 33 C 20.41 33 24 29.41 24 25 C 24 20.59 20.41 17 16 17 z M 34 17 C 29.59 17 26 20.59 26 25 C 26 29.41 29.59 33 34 33 C 38.41 33 42 29.41 42 25 C 42 20.59 38.41 17 34 17 z\"/></svg>"),
|
||||||
|
Rules: []string{
|
||||||
|
"||flic.kr^",
|
||||||
|
"||flickr.com^",
|
||||||
|
"||flickr.net^",
|
||||||
|
"||flickrprints.com^",
|
||||||
|
"||flickrpro.com^",
|
||||||
|
"||staticflickr.com^",
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
ID: "gog",
|
ID: "gog",
|
||||||
Name: "GOG",
|
Name: "GOG",
|
||||||
|
@ -1325,6 +1345,13 @@ var blockedServices = []blockedService{{
|
||||||
"||kakao.com^",
|
"||kakao.com^",
|
||||||
"||kgslb.com^",
|
"||kgslb.com^",
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
ID: "kik",
|
||||||
|
Name: "Kik",
|
||||||
|
IconSVG: []byte("<svg xmlns=\"http://www.w3.org/2000/svg\" fill=\"currentColor\" viewBox=\"0 0 50 50\"><path d=\"M 3.5039062 12 C 1.9347705 11.994817 0.87857579 12.97636 0.4453125 13.849609 C 0.01204921 14.722858 0 15.564453 0 15.564453 A 1.0001 1.0001 0 0 0 0 15.59375 L 0 35 A 1.0001 1.0001 0 0 0 0.00390625 35.078125 C 0.00390625 35.078125 0.05696144 35.828363 0.5390625 36.554688 C 1.0211636 37.281011 2.0459252 38.004441 3.5019531 38.001953 C 4.8916439 38.000053 5.8837351 37.273604 6.3769531 36.578125 C 6.8701712 35.882646 6.9863281 35.166016 6.9863281 35.166016 A 1.0001 1.0001 0 0 0 7 35 L 7 31.802734 L 10.167969 36.554688 L 10.130859 36.494141 C 10.511831 37.164615 11.143097 37.525465 11.742188 37.730469 C 12.341278 37.935473 12.950104 38.001953 13.5 38.001953 C 15.411725 38.001953 17 36.431487 17 34.5 C 17 34.056649 16.90825 34.03442 16.851562 33.912109 C 16.794882 33.789799 16.730864 33.671331 16.654297 33.537109 C 16.501163 33.268666 16.298339 32.944015 16.058594 32.572266 C 15.579103 31.828767 14.950355 30.90254 14.322266 29.992188 C 13.310206 28.525308 12.655222 27.610988 12.300781 27.113281 L 14.707031 24.707031 A 1.0001 1.0001 0 0 0 14.738281 24.673828 C 14.738281 24.673828 15.354706 24.012223 15.748047 23.042969 C 16.141388 22.073714 16.298687 20.56089 15.259766 19.349609 C 14.281705 18.208994 12.842689 18.141009 11.925781 18.416016 C 11.008874 18.691022 10.371094 19.222656 10.371094 19.222656 A 1.0001 1.0001 0 0 0 10.292969 19.292969 L 6.9980469 22.587891 L 6.9921875 15.646484 A 1.0001 1.0001 0 0 0 6.9902344 15.580078 C 6.9902344 15.580078 6.9441634 14.743069 6.5058594 13.875 C 6.0675579 13.006938 5.0412971 12.005313 3.5039062 12 z M 30.503906 12 C 28.93477 11.9948 27.878577 12.97636 27.445312 13.849609 C 27.012049 14.722858 27 15.564453 27 15.564453 A 1.0001 1.0001 0 0 0 27 15.59375 L 27 35 A 1.0001 1.0001 0 0 0 27.003906 35.078125 C 27.003906 35.078125 27.056966 35.828363 27.539062 36.554688 C 28.021165 37.281011 29.045925 38.004441 30.501953 38.001953 C 31.891644 38.000053 32.883735 37.273604 33.376953 36.578125 C 33.870171 35.882646 33.986328 35.166016 33.986328 35.166016 A 1.0001 1.0001 0 0 0 34 35 L 34 31.802734 L 37.167969 36.554688 L 37.130859 36.494141 C 37.511831 37.164615 38.143096 37.525465 38.742188 37.730469 C 39.341277 37.935473 39.950104 38.001953 40.5 38.001953 C 42.411725 38.001953 44 36.431487 44 34.5 C 44 34.056649 43.908251 34.03442 43.851562 33.912109 C 43.794882 33.789799 43.730864 33.671331 43.654297 33.537109 C 43.501163 33.268666 43.298339 32.944015 43.058594 32.572266 C 42.579103 31.828767 41.950355 30.90254 41.322266 29.992188 C 40.310206 28.525308 39.655222 27.610988 39.300781 27.113281 L 41.707031 24.707031 A 1.0001 1.0001 0 0 0 41.738281 24.673828 C 41.738281 24.673828 42.354706 24.012223 42.748047 23.042969 C 43.141388 22.073714 43.298687 20.56089 42.259766 19.349609 C 41.281705 18.208994 39.842689 18.141009 38.925781 18.416016 C 38.008874 18.691022 37.371094 19.222656 37.371094 19.222656 A 1.0001 1.0001 0 0 0 37.292969 19.292969 L 33.998047 22.587891 L 33.992188 15.646484 A 1.0001 1.0001 0 0 0 33.990234 15.580078 C 33.990234 15.580078 33.944164 14.743069 33.505859 13.875 C 33.067647 13.006938 32.041297 12.005313 30.503906 12 z M 21.507812 18 C 19.85324 17.98686 18.785557 19.124468 18.382812 20.09375 C 18.181441 20.578391 18.090615 21.031738 18.044922 21.375 C 18.022072 21.546631 18.011459 21.69063 18.005859 21.796875 C 18.000252 21.90312 18 22.065333 18 21.984375 L 17.982422 34.998047 A 1.0001 1.0001 0 0 0 17.990234 35.134766 C 17.990234 35.134766 18.085674 35.862804 18.576172 36.568359 C 19.06667 37.273915 20.071581 37.997467 21.486328 38 C 22.885358 38.0026 23.885897 37.278643 24.380859 36.580078 C 24.875822 35.881513 24.986328 35.160156 24.986328 35.160156 A 1.0001 1.0001 0 0 0 25 35 L 25 21.996094 C 25 21.996094 25.02572 21.084043 24.625 20.117188 C 24.224283 19.150332 23.164841 18.013078 21.507812 18 z M 46.5 24 C 44.578848 24 43 25.578848 43 27.5 C 43 29.421152 44.578848 31 46.5 31 C 48.421152 31 50 29.421152 50 27.5 C 50 25.578848 48.421152 24 46.5 24 z M 46.5 26 C 47.340272 26 48 26.659728 48 27.5 C 48 28.340272 47.340272 29 46.5 29 C 45.659728 29 45 28.340272 45 27.5 C 45 26.659728 45.659728 26 46.5 26 z\"/></svg>"),
|
||||||
|
Rules: []string{
|
||||||
|
"||kik.com^",
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
ID: "lazada",
|
ID: "lazada",
|
||||||
Name: "Lazada",
|
Name: "Lazada",
|
||||||
|
@ -1385,6 +1412,7 @@ var blockedServices = []blockedService{{
|
||||||
Rules: []string{
|
Rules: []string{
|
||||||
"||aus.social^",
|
"||aus.social^",
|
||||||
"||awscommunity.social^",
|
"||awscommunity.social^",
|
||||||
|
"||climatejustice.social^",
|
||||||
"||cyberplace.social^",
|
"||cyberplace.social^",
|
||||||
"||defcon.social^",
|
"||defcon.social^",
|
||||||
"||det.social^",
|
"||det.social^",
|
||||||
|
@ -1442,13 +1470,13 @@ var blockedServices = []blockedService{{
|
||||||
"||mstdn.plus^",
|
"||mstdn.plus^",
|
||||||
"||mstdn.social^",
|
"||mstdn.social^",
|
||||||
"||muenchen.social^",
|
"||muenchen.social^",
|
||||||
"||muenster.im^",
|
|
||||||
"||newsie.social^",
|
"||newsie.social^",
|
||||||
"||noc.social^",
|
"||noc.social^",
|
||||||
"||norden.social^",
|
"||norden.social^",
|
||||||
"||nrw.social^",
|
"||nrw.social^",
|
||||||
"||o3o.ca^",
|
"||o3o.ca^",
|
||||||
"||ohai.social^",
|
"||ohai.social^",
|
||||||
|
"||pewtix.com^",
|
||||||
"||piaille.fr^",
|
"||piaille.fr^",
|
||||||
"||pol.social^",
|
"||pol.social^",
|
||||||
"||ravenation.club^",
|
"||ravenation.club^",
|
||||||
|
@ -1480,7 +1508,6 @@ var blockedServices = []blockedService{{
|
||||||
"||union.place^",
|
"||union.place^",
|
||||||
"||universeodon.com^",
|
"||universeodon.com^",
|
||||||
"||urbanists.social^",
|
"||urbanists.social^",
|
||||||
"||wien.rocks^",
|
|
||||||
"||wxw.moe^",
|
"||wxw.moe^",
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
@ -1827,6 +1854,13 @@ var blockedServices = []blockedService{{
|
||||||
"||tx.me^",
|
"||tx.me^",
|
||||||
"||usercontent.dev^",
|
"||usercontent.dev^",
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
ID: "tidal",
|
||||||
|
Name: "Tidal",
|
||||||
|
IconSVG: []byte("<svg xmlns=\"http://www.w3.org/2000/svg\" fill=\"currentColor\" viewBox=\"0 0 50 50\"><path d=\"M 9 12 C 8.7615 12 8.5237969 12.091437 8.3417969 12.273438 L 1.2734375 19.341797 C 0.9094375 19.705797 0.9094375 20.294203 1.2734375 20.658203 L 8.3417969 27.726562 C 8.7057969 28.090563 9.2942031 28.090563 9.6582031 27.726562 L 16.726562 20.658203 C 16.908563 20.476203 17 20.2385 17 20 C 17 19.7615 16.908563 19.523797 16.726562 19.341797 L 9.6582031 12.273438 C 9.4762031 12.091437 9.2385 12 9 12 z M 17 20 C 17 20.2385 17.091438 20.476203 17.273438 20.658203 L 24.341797 27.726562 C 24.523797 27.908563 24.7615 28 25 28 C 25.2385 28 25.476203 27.908563 25.658203 27.726562 L 32.726562 20.658203 C 32.908563 20.476203 33 20.2385 33 20 C 33 19.7615 32.908563 19.523797 32.726562 19.341797 L 25.658203 12.273438 C 25.294203 11.909437 24.705797 11.909437 24.341797 12.273438 L 17.273438 19.341797 C 17.091437 19.523797 17 19.7615 17 20 z M 33 20 C 33 20.2385 33.091437 20.476203 33.273438 20.658203 L 40.341797 27.726562 C 40.705797 28.090563 41.294203 28.090563 41.658203 27.726562 L 48.726562 20.658203 C 49.090563 20.294203 49.090563 19.705797 48.726562 19.341797 L 41.658203 12.273438 C 41.294203 11.909437 40.705797 11.909437 40.341797 12.273438 L 33.273438 19.341797 C 33.091437 19.523797 33 19.7615 33 20 z M 25 28 C 24.7615 28 24.523797 28.091437 24.341797 28.273438 L 17.273438 35.341797 C 16.909437 35.705797 16.909437 36.294203 17.273438 36.658203 L 24.341797 43.726562 C 24.705797 44.090562 25.294203 44.090562 25.658203 43.726562 L 32.726562 36.658203 C 33.090563 36.294203 33.090563 35.705797 32.726562 35.341797 L 25.658203 28.273438 C 25.476203 28.091437 25.2385 28 25 28 z\"/></svg>"),
|
||||||
|
Rules: []string{
|
||||||
|
"||tidal.com^",
|
||||||
|
},
|
||||||
}, {
|
}, {
|
||||||
ID: "tiktok",
|
ID: "tiktok",
|
||||||
Name: "TikTok",
|
Name: "TikTok",
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
|
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/filtering/safesearch"
|
"github.com/AdguardTeam/AdGuardHome/internal/filtering/safesearch"
|
||||||
"github.com/AdguardTeam/dnsproxy/proxy"
|
"github.com/AdguardTeam/dnsproxy/proxy"
|
||||||
|
"github.com/AdguardTeam/golibs/stringutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Client contains information about persistent clients.
|
// Client contains information about persistent clients.
|
||||||
|
@ -37,6 +38,19 @@ type Client struct {
|
||||||
IgnoreStatistics bool
|
IgnoreStatistics bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ShallowClone returns a deep copy of the client, except upstreamConfig,
|
||||||
|
// safeSearchConf, SafeSearch fields, because it's difficult to copy them.
|
||||||
|
func (c *Client) ShallowClone() (sh *Client) {
|
||||||
|
clone := *c
|
||||||
|
|
||||||
|
clone.IDs = stringutil.CloneSlice(c.IDs)
|
||||||
|
clone.Tags = stringutil.CloneSlice(c.Tags)
|
||||||
|
clone.BlockedServices = stringutil.CloneSlice(c.BlockedServices)
|
||||||
|
clone.Upstreams = stringutil.CloneSlice(c.Upstreams)
|
||||||
|
|
||||||
|
return &clone
|
||||||
|
}
|
||||||
|
|
||||||
// closeUpstreams closes the client-specific upstream config of c if any.
|
// closeUpstreams closes the client-specific upstream config of c if any.
|
||||||
func (c *Client) closeUpstreams() (err error) {
|
func (c *Client) closeUpstreams() (err error) {
|
||||||
if c.upstreamConfig != nil {
|
if c.upstreamConfig != nil {
|
||||||
|
|
|
@ -378,6 +378,7 @@ func (clients *clientsContainer) clientOrArtificial(
|
||||||
}, true
|
}, true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Find returns a shallow copy of the client if there is one found.
|
||||||
func (clients *clientsContainer) Find(id string) (c *Client, ok bool) {
|
func (clients *clientsContainer) Find(id string) (c *Client, ok bool) {
|
||||||
clients.lock.Lock()
|
clients.lock.Lock()
|
||||||
defer clients.lock.Unlock()
|
defer clients.lock.Unlock()
|
||||||
|
@ -387,20 +388,18 @@ func (clients *clientsContainer) Find(id string) (c *Client, ok bool) {
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
c.IDs = stringutil.CloneSlice(c.IDs)
|
return c.ShallowClone(), true
|
||||||
c.Tags = stringutil.CloneSlice(c.Tags)
|
|
||||||
c.BlockedServices = stringutil.CloneSlice(c.BlockedServices)
|
|
||||||
c.Upstreams = stringutil.CloneSlice(c.Upstreams)
|
|
||||||
|
|
||||||
return c, true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// shouldCountClient is a wrapper around Find to make it a valid client
|
// shouldCountClient is a wrapper around Find to make it a valid client
|
||||||
// information finder for the statistics. If no information about the client
|
// information finder for the statistics. If no information about the client
|
||||||
// is found, it returns true.
|
// is found, it returns true.
|
||||||
func (clients *clientsContainer) shouldCountClient(ids []string) (y bool) {
|
func (clients *clientsContainer) shouldCountClient(ids []string) (y bool) {
|
||||||
|
clients.lock.Lock()
|
||||||
|
defer clients.lock.Unlock()
|
||||||
|
|
||||||
for _, id := range ids {
|
for _, id := range ids {
|
||||||
client, ok := clients.Find(id)
|
client, ok := clients.findLocked(id)
|
||||||
if ok {
|
if ok {
|
||||||
return !client.IgnoreStatistics
|
return !client.IgnoreStatistics
|
||||||
}
|
}
|
||||||
|
@ -617,6 +616,15 @@ func (clients *clientsContainer) Add(c *Client) (ok bool, err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
clients.add(c)
|
||||||
|
|
||||||
|
log.Debug("clients: added %q: ID:%q [%d]", c.Name, c.IDs, len(clients.list))
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// add c to the indexes. clients.lock is expected to be locked.
|
||||||
|
func (clients *clientsContainer) add(c *Client) {
|
||||||
// update Name index
|
// update Name index
|
||||||
clients.list[c.Name] = c
|
clients.list[c.Name] = c
|
||||||
|
|
||||||
|
@ -624,10 +632,6 @@ func (clients *clientsContainer) Add(c *Client) (ok bool, err error) {
|
||||||
for _, id := range c.IDs {
|
for _, id := range c.IDs {
|
||||||
clients.idIndex[id] = c
|
clients.idIndex[id] = c
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug("clients: added %q: ID:%q [%d]", c.Name, c.IDs, len(clients.list))
|
|
||||||
|
|
||||||
return true, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Del removes a client. ok is false if there is no such client.
|
// Del removes a client. ok is false if there is no such client.
|
||||||
|
@ -645,86 +649,53 @@ func (clients *clientsContainer) Del(name string) (ok bool) {
|
||||||
log.Error("client container: removing client %s: %s", name, err)
|
log.Error("client container: removing client %s: %s", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
clients.del(c)
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// del removes c from the indexes. clients.lock is expected to be locked.
|
||||||
|
func (clients *clientsContainer) del(c *Client) {
|
||||||
// update Name index
|
// update Name index
|
||||||
delete(clients.list, name)
|
delete(clients.list, c.Name)
|
||||||
|
|
||||||
// update ID index
|
// update ID index
|
||||||
for _, id := range c.IDs {
|
for _, id := range c.IDs {
|
||||||
delete(clients.idIndex, id)
|
delete(clients.idIndex, id)
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update updates a client by its name.
|
// Update updates a client by its name.
|
||||||
func (clients *clientsContainer) Update(name string, c *Client) (err error) {
|
func (clients *clientsContainer) Update(prev, c *Client) (err error) {
|
||||||
err = clients.check(c)
|
err = clients.check(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// Don't wrap the error since it's informative enough as is.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
clients.lock.Lock()
|
clients.lock.Lock()
|
||||||
defer clients.lock.Unlock()
|
defer clients.lock.Unlock()
|
||||||
|
|
||||||
prev, ok := clients.list[name]
|
// Check the name index.
|
||||||
if !ok {
|
|
||||||
return errors.Error("client not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
// First, check the name index.
|
|
||||||
if prev.Name != c.Name {
|
if prev.Name != c.Name {
|
||||||
_, ok = clients.list[c.Name]
|
_, ok := clients.list[c.Name]
|
||||||
if ok {
|
if ok {
|
||||||
return errors.Error("client already exists")
|
return errors.Error("client already exists")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Second, update the ID index.
|
// Check the ID index.
|
||||||
err = clients.updateIDIndex(prev, c.IDs)
|
if !slices.Equal(prev.IDs, c.IDs) {
|
||||||
if err != nil {
|
for _, id := range c.IDs {
|
||||||
// Don't wrap the error, because it's informative enough as is.
|
existing, ok := clients.idIndex[id]
|
||||||
return err
|
if ok && existing != prev {
|
||||||
}
|
return fmt.Errorf("id %q is used by client with name %q", id, existing.Name)
|
||||||
|
}
|
||||||
// Update name index.
|
|
||||||
if prev.Name != c.Name {
|
|
||||||
delete(clients.list, prev.Name)
|
|
||||||
clients.list[c.Name] = prev
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update upstreams cache.
|
|
||||||
err = c.closeUpstreams()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
*prev = *c
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateIDIndex updates the ID index data for cli using the information from
|
|
||||||
// newIDs.
|
|
||||||
func (clients *clientsContainer) updateIDIndex(cli *Client, newIDs []string) (err error) {
|
|
||||||
if slices.Equal(cli.IDs, newIDs) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, id := range newIDs {
|
|
||||||
existing, ok := clients.idIndex[id]
|
|
||||||
if ok && existing != cli {
|
|
||||||
return fmt.Errorf("id %q is used by client with name %q", id, existing.Name)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the IDs in the index.
|
clients.del(prev)
|
||||||
for _, id := range cli.IDs {
|
clients.add(c)
|
||||||
delete(clients.idIndex, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, id := range newIDs {
|
|
||||||
clients.idIndex[id] = cli
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -98,22 +98,8 @@ func TestClients(t *testing.T) {
|
||||||
assert.False(t, ok)
|
assert.False(t, ok)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("update_fail_name", func(t *testing.T) {
|
|
||||||
err := clients.Update("client3", &Client{
|
|
||||||
IDs: []string{"1.2.3.0"},
|
|
||||||
Name: "client3",
|
|
||||||
})
|
|
||||||
require.Error(t, err)
|
|
||||||
|
|
||||||
err = clients.Update("client3", &Client{
|
|
||||||
IDs: []string{"1.2.3.0"},
|
|
||||||
Name: "client2",
|
|
||||||
})
|
|
||||||
assert.Error(t, err)
|
|
||||||
})
|
|
||||||
|
|
||||||
t.Run("update_fail_ip", func(t *testing.T) {
|
t.Run("update_fail_ip", func(t *testing.T) {
|
||||||
err := clients.Update("client1", &Client{
|
err := clients.Update(&Client{Name: "client1"}, &Client{
|
||||||
IDs: []string{"2.2.2.2"},
|
IDs: []string{"2.2.2.2"},
|
||||||
Name: "client1",
|
Name: "client1",
|
||||||
})
|
})
|
||||||
|
@ -129,7 +115,10 @@ func TestClients(t *testing.T) {
|
||||||
cliNewIP = netip.MustParseAddr(cliNew)
|
cliNewIP = netip.MustParseAddr(cliNew)
|
||||||
)
|
)
|
||||||
|
|
||||||
err := clients.Update("client1", &Client{
|
prev, ok := clients.list["client1"]
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
err := clients.Update(prev, &Client{
|
||||||
IDs: []string{cliNew},
|
IDs: []string{cliNew},
|
||||||
Name: "client1",
|
Name: "client1",
|
||||||
})
|
})
|
||||||
|
@ -138,7 +127,10 @@ func TestClients(t *testing.T) {
|
||||||
assert.Equal(t, clients.clientSource(cliOldIP), ClientSourceNone)
|
assert.Equal(t, clients.clientSource(cliOldIP), ClientSourceNone)
|
||||||
assert.Equal(t, clients.clientSource(cliNewIP), ClientSourcePersistent)
|
assert.Equal(t, clients.clientSource(cliNewIP), ClientSourcePersistent)
|
||||||
|
|
||||||
err = clients.Update("client1", &Client{
|
prev, ok = clients.list["client1"]
|
||||||
|
require.True(t, ok)
|
||||||
|
|
||||||
|
err = clients.Update(prev, &Client{
|
||||||
IDs: []string{cliNew},
|
IDs: []string{cliNew},
|
||||||
Name: "client1-renamed",
|
Name: "client1-renamed",
|
||||||
UseOwnSettings: true,
|
UseOwnSettings: true,
|
||||||
|
|
|
@ -289,7 +289,7 @@ func (clients *clientsContainer) handleUpdateClient(w http.ResponseWriter, r *ht
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
err = clients.Update(dj.Name, c)
|
err = clients.Update(prev, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
aghhttp.Error(r, w, http.StatusBadRequest, "%s", err)
|
aghhttp.Error(r, w, http.StatusBadRequest, "%s", err)
|
||||||
|
|
||||||
|
|
|
@ -399,19 +399,39 @@ func (c *configuration) getConfigFilename() string {
|
||||||
return configFile
|
return configFile
|
||||||
}
|
}
|
||||||
|
|
||||||
// getLogSettings reads logging settings from the config file.
|
// readLogSettings reads logging settings from the config file. We do it in a
|
||||||
// we do it in a separate method in order to configure logger before the actual configuration is parsed and applied.
|
// separate method in order to configure logger before the actual configuration
|
||||||
func getLogSettings() logSettings {
|
// is parsed and applied.
|
||||||
l := logSettings{}
|
func readLogSettings() (ls *logSettings) {
|
||||||
|
ls = &logSettings{}
|
||||||
|
|
||||||
yamlFile, err := readConfigFile()
|
yamlFile, err := readConfigFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return l
|
return ls
|
||||||
}
|
}
|
||||||
err = yaml.Unmarshal(yamlFile, &l)
|
|
||||||
|
err = yaml.Unmarshal(yamlFile, ls)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Couldn't get logging settings from the configuration: %s", err)
|
log.Error("Couldn't get logging settings from the configuration: %s", err)
|
||||||
}
|
}
|
||||||
return l
|
|
||||||
|
return ls
|
||||||
|
}
|
||||||
|
|
||||||
|
// validateBindHosts returns error if any of binding hosts from configuration is
|
||||||
|
// not a valid IP address.
|
||||||
|
func validateBindHosts(conf *configuration) (err error) {
|
||||||
|
if !conf.BindHost.IsValid() {
|
||||||
|
return errors.Error("bind_host is not a valid ip address")
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, addr := range conf.DNS.BindHosts {
|
||||||
|
if !addr.IsValid() {
|
||||||
|
return fmt.Errorf("dns.bind_hosts at index %d is not a valid ip address", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseConfig loads configuration from the YAML file
|
// parseConfig loads configuration from the YAML file
|
||||||
|
@ -425,6 +445,13 @@ func parseConfig() (err error) {
|
||||||
config.fileData = nil
|
config.fileData = nil
|
||||||
err = yaml.Unmarshal(fileData, &config)
|
err = yaml.Unmarshal(fileData, &config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// Don't wrap the error since it's informative enough as is.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = validateBindHosts(config)
|
||||||
|
if err != nil {
|
||||||
|
// Don't wrap the error since it's informative enough as is.
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -180,7 +180,7 @@ func registerControlHandlers() {
|
||||||
httpRegister(http.MethodGet, "/control/status", handleStatus)
|
httpRegister(http.MethodGet, "/control/status", handleStatus)
|
||||||
httpRegister(http.MethodPost, "/control/i18n/change_language", handleI18nChangeLanguage)
|
httpRegister(http.MethodPost, "/control/i18n/change_language", handleI18nChangeLanguage)
|
||||||
httpRegister(http.MethodGet, "/control/i18n/current_language", handleI18nCurrentLanguage)
|
httpRegister(http.MethodGet, "/control/i18n/current_language", handleI18nCurrentLanguage)
|
||||||
Context.mux.HandleFunc("/control/version.json", postInstall(optionalAuth(handleGetVersionJSON)))
|
Context.mux.HandleFunc("/control/version.json", postInstall(optionalAuth(handleVersionJSON)))
|
||||||
httpRegister(http.MethodPost, "/control/update", handleUpdate)
|
httpRegister(http.MethodPost, "/control/update", handleUpdate)
|
||||||
httpRegister(http.MethodGet, "/control/profile", handleGetProfile)
|
httpRegister(http.MethodGet, "/control/profile", handleGetProfile)
|
||||||
httpRegister(http.MethodPut, "/control/profile/update", handlePutProfile)
|
httpRegister(http.MethodPut, "/control/profile/update", handlePutProfile)
|
||||||
|
|
|
@ -26,15 +26,14 @@ type temporaryError interface {
|
||||||
Temporary() (ok bool)
|
Temporary() (ok bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the latest available version from the Internet
|
// handleVersionJSON is the handler for the POST /control/version.json HTTP API.
|
||||||
func handleGetVersionJSON(w http.ResponseWriter, r *http.Request) {
|
//
|
||||||
|
// TODO(a.garipov): Find out if this API used with a GET method by anyone.
|
||||||
|
func handleVersionJSON(w http.ResponseWriter, r *http.Request) {
|
||||||
resp := &versionResponse{}
|
resp := &versionResponse{}
|
||||||
if Context.disableUpdate {
|
if Context.disableUpdate {
|
||||||
resp.Disabled = true
|
resp.Disabled = true
|
||||||
err := json.NewEncoder(w).Encode(resp)
|
_ = aghhttp.WriteJSONResponse(w, r, resp)
|
||||||
if err != nil {
|
|
||||||
aghhttp.Error(r, w, http.StatusInternalServerError, "writing body: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,14 +27,17 @@ import (
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/dhcpd"
|
"github.com/AdguardTeam/AdGuardHome/internal/dhcpd"
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/dnsforward"
|
"github.com/AdguardTeam/AdGuardHome/internal/dnsforward"
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
|
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
|
||||||
|
"github.com/AdguardTeam/AdGuardHome/internal/filtering/hashprefix"
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/filtering/safesearch"
|
"github.com/AdguardTeam/AdGuardHome/internal/filtering/safesearch"
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/querylog"
|
"github.com/AdguardTeam/AdGuardHome/internal/querylog"
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/stats"
|
"github.com/AdguardTeam/AdGuardHome/internal/stats"
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/updater"
|
"github.com/AdguardTeam/AdGuardHome/internal/updater"
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/version"
|
"github.com/AdguardTeam/AdGuardHome/internal/version"
|
||||||
|
"github.com/AdguardTeam/dnsproxy/upstream"
|
||||||
"github.com/AdguardTeam/golibs/errors"
|
"github.com/AdguardTeam/golibs/errors"
|
||||||
"github.com/AdguardTeam/golibs/log"
|
"github.com/AdguardTeam/golibs/log"
|
||||||
"github.com/AdguardTeam/golibs/netutil"
|
"github.com/AdguardTeam/golibs/netutil"
|
||||||
|
"github.com/AdguardTeam/golibs/stringutil"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
"gopkg.in/natefinch/lumberjack.v2"
|
"gopkg.in/natefinch/lumberjack.v2"
|
||||||
)
|
)
|
||||||
|
@ -143,7 +146,9 @@ func Main(clientBuildFS fs.FS) {
|
||||||
run(opts, clientBuildFS)
|
run(opts, clientBuildFS)
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupContext(opts options) {
|
// setupContext initializes [Context] fields. It also reads and upgrades
|
||||||
|
// config file if necessary.
|
||||||
|
func setupContext(opts options) (err error) {
|
||||||
setupContextFlags(opts)
|
setupContextFlags(opts)
|
||||||
|
|
||||||
Context.tlsRoots = aghtls.SystemRootCAs()
|
Context.tlsRoots = aghtls.SystemRootCAs()
|
||||||
|
@ -160,10 +165,15 @@ func setupContext(opts options) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Context.mux = http.NewServeMux()
|
||||||
|
|
||||||
if !Context.firstRun {
|
if !Context.firstRun {
|
||||||
// Do the upgrade if necessary.
|
// Do the upgrade if necessary.
|
||||||
err := upgradeConfig()
|
err = upgradeConfig()
|
||||||
fatalOnError(err)
|
if err != nil {
|
||||||
|
// Don't wrap the error, because it's informative enough as is.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if err = parseConfig(); err != nil {
|
if err = parseConfig(); err != nil {
|
||||||
log.Error("parsing configuration file: %s", err)
|
log.Error("parsing configuration file: %s", err)
|
||||||
|
@ -179,11 +189,14 @@ func setupContext(opts options) {
|
||||||
|
|
||||||
if !opts.noEtcHosts && config.Clients.Sources.HostsFile {
|
if !opts.noEtcHosts && config.Clients.Sources.HostsFile {
|
||||||
err = setupHostsContainer()
|
err = setupHostsContainer()
|
||||||
fatalOnError(err)
|
if err != nil {
|
||||||
|
// Don't wrap the error, because it's informative enough as is.
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Context.mux = http.NewServeMux()
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// setupContextFlags sets global flags and prints their status to the log.
|
// setupContextFlags sets global flags and prints their status to the log.
|
||||||
|
@ -285,25 +298,27 @@ func setupHostsContainer() (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupConfig(opts options) (err error) {
|
// setupOpts sets up command-line options.
|
||||||
config.DNS.DnsfilterConf.EtcHosts = Context.etcHosts
|
func setupOpts(opts options) (err error) {
|
||||||
config.DNS.DnsfilterConf.ConfigModified = onConfigModified
|
err = setupBindOpts(opts)
|
||||||
config.DNS.DnsfilterConf.HTTPRegister = httpRegister
|
|
||||||
config.DNS.DnsfilterConf.DataDir = Context.getDataDir()
|
|
||||||
config.DNS.DnsfilterConf.Filters = slices.Clone(config.Filters)
|
|
||||||
config.DNS.DnsfilterConf.WhitelistFilters = slices.Clone(config.WhitelistFilters)
|
|
||||||
config.DNS.DnsfilterConf.UserRules = slices.Clone(config.UserRules)
|
|
||||||
config.DNS.DnsfilterConf.HTTPClient = Context.client
|
|
||||||
|
|
||||||
config.DNS.DnsfilterConf.SafeSearchConf.CustomResolver = safeSearchResolver{}
|
|
||||||
config.DNS.DnsfilterConf.SafeSearch, err = safesearch.NewDefault(
|
|
||||||
config.DNS.DnsfilterConf.SafeSearchConf,
|
|
||||||
"default",
|
|
||||||
config.DNS.DnsfilterConf.SafeSearchCacheSize,
|
|
||||||
time.Minute*time.Duration(config.DNS.DnsfilterConf.CacheTime),
|
|
||||||
)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("initializing safesearch: %w", err)
|
// Don't wrap the error, because it's informative enough as is.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(opts.pidFile) != 0 && writePIDFile(opts.pidFile) {
|
||||||
|
Context.pidFileName = opts.pidFile
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// initContextClients initializes Context clients and related fields.
|
||||||
|
func initContextClients() (err error) {
|
||||||
|
err = setupDNSFilteringConf(config.DNS.DnsfilterConf)
|
||||||
|
if err != nil {
|
||||||
|
// Don't wrap the error, because it's informative enough as is.
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
//lint:ignore SA1019 Migration is not over.
|
//lint:ignore SA1019 Migration is not over.
|
||||||
|
@ -338,8 +353,19 @@ func setupConfig(opts options) (err error) {
|
||||||
arpdb = aghnet.NewARPDB()
|
arpdb = aghnet.NewARPDB()
|
||||||
}
|
}
|
||||||
|
|
||||||
Context.clients.Init(config.Clients.Persistent, Context.dhcpServer, Context.etcHosts, arpdb, config.DNS.DnsfilterConf)
|
Context.clients.Init(
|
||||||
|
config.Clients.Persistent,
|
||||||
|
Context.dhcpServer,
|
||||||
|
Context.etcHosts,
|
||||||
|
arpdb,
|
||||||
|
config.DNS.DnsfilterConf,
|
||||||
|
)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// setupBindOpts overrides bind host/port from the opts.
|
||||||
|
func setupBindOpts(opts options) (err error) {
|
||||||
if opts.bindPort != 0 {
|
if opts.bindPort != 0 {
|
||||||
config.BindPort = opts.bindPort
|
config.BindPort = opts.bindPort
|
||||||
|
|
||||||
|
@ -350,12 +376,83 @@ func setupConfig(opts options) (err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// override bind host/port from the console
|
|
||||||
if opts.bindHost.IsValid() {
|
if opts.bindHost.IsValid() {
|
||||||
config.BindHost = opts.bindHost
|
config.BindHost = opts.bindHost
|
||||||
}
|
}
|
||||||
if len(opts.pidFile) != 0 && writePIDFile(opts.pidFile) {
|
|
||||||
Context.pidFileName = opts.pidFile
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// setupDNSFilteringConf sets up DNS filtering configuration settings.
|
||||||
|
func setupDNSFilteringConf(conf *filtering.Config) (err error) {
|
||||||
|
const (
|
||||||
|
dnsTimeout = 3 * time.Second
|
||||||
|
|
||||||
|
sbService = "safe browsing"
|
||||||
|
defaultSafeBrowsingServer = `https://family.adguard-dns.com/dns-query`
|
||||||
|
sbTXTSuffix = `sb.dns.adguard.com.`
|
||||||
|
|
||||||
|
pcService = "parental control"
|
||||||
|
defaultParentalServer = `https://family.adguard-dns.com/dns-query`
|
||||||
|
pcTXTSuffix = `pc.dns.adguard.com.`
|
||||||
|
)
|
||||||
|
|
||||||
|
conf.EtcHosts = Context.etcHosts
|
||||||
|
conf.ConfigModified = onConfigModified
|
||||||
|
conf.HTTPRegister = httpRegister
|
||||||
|
conf.DataDir = Context.getDataDir()
|
||||||
|
conf.Filters = slices.Clone(config.Filters)
|
||||||
|
conf.WhitelistFilters = slices.Clone(config.WhitelistFilters)
|
||||||
|
conf.UserRules = slices.Clone(config.UserRules)
|
||||||
|
conf.HTTPClient = Context.client
|
||||||
|
|
||||||
|
cacheTime := time.Duration(conf.CacheTime) * time.Minute
|
||||||
|
|
||||||
|
upsOpts := &upstream.Options{
|
||||||
|
Timeout: dnsTimeout,
|
||||||
|
ServerIPAddrs: []net.IP{
|
||||||
|
{94, 140, 14, 15},
|
||||||
|
{94, 140, 15, 16},
|
||||||
|
net.ParseIP("2a10:50c0::bad1:ff"),
|
||||||
|
net.ParseIP("2a10:50c0::bad2:ff"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
sbUps, err := upstream.AddressToUpstream(defaultSafeBrowsingServer, upsOpts)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("converting safe browsing server: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
conf.SafeBrowsingChecker = hashprefix.New(&hashprefix.Config{
|
||||||
|
Upstream: sbUps,
|
||||||
|
ServiceName: sbService,
|
||||||
|
TXTSuffix: sbTXTSuffix,
|
||||||
|
CacheTime: cacheTime,
|
||||||
|
CacheSize: conf.SafeBrowsingCacheSize,
|
||||||
|
})
|
||||||
|
|
||||||
|
parUps, err := upstream.AddressToUpstream(defaultParentalServer, upsOpts)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("converting parental server: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
conf.ParentalControlChecker = hashprefix.New(&hashprefix.Config{
|
||||||
|
Upstream: parUps,
|
||||||
|
ServiceName: pcService,
|
||||||
|
TXTSuffix: pcTXTSuffix,
|
||||||
|
CacheTime: cacheTime,
|
||||||
|
CacheSize: conf.SafeBrowsingCacheSize,
|
||||||
|
})
|
||||||
|
|
||||||
|
conf.SafeSearchConf.CustomResolver = safeSearchResolver{}
|
||||||
|
conf.SafeSearch, err = safesearch.NewDefault(
|
||||||
|
conf.SafeSearchConf,
|
||||||
|
"default",
|
||||||
|
conf.SafeSearchCacheSize,
|
||||||
|
cacheTime,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("initializing safesearch: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -432,14 +529,16 @@ func fatalOnError(err error) {
|
||||||
|
|
||||||
// run configures and starts AdGuard Home.
|
// run configures and starts AdGuard Home.
|
||||||
func run(opts options, clientBuildFS fs.FS) {
|
func run(opts options, clientBuildFS fs.FS) {
|
||||||
// configure config filename
|
// Configure config filename.
|
||||||
initConfigFilename(opts)
|
initConfigFilename(opts)
|
||||||
|
|
||||||
// configure working dir and config path
|
// Configure working dir and config path.
|
||||||
initWorkingDir(opts)
|
err := initWorkingDir(opts)
|
||||||
|
fatalOnError(err)
|
||||||
|
|
||||||
// configure log level and output
|
// Configure log level and output.
|
||||||
configureLogger(opts)
|
err = configureLogger(opts)
|
||||||
|
fatalOnError(err)
|
||||||
|
|
||||||
// Print the first message after logger is configured.
|
// Print the first message after logger is configured.
|
||||||
log.Info(version.Full())
|
log.Info(version.Full())
|
||||||
|
@ -448,25 +547,29 @@ func run(opts options, clientBuildFS fs.FS) {
|
||||||
log.Info("AdGuard Home is running as a service")
|
log.Info("AdGuard Home is running as a service")
|
||||||
}
|
}
|
||||||
|
|
||||||
setupContext(opts)
|
err = setupContext(opts)
|
||||||
|
|
||||||
err := configureOS(config)
|
|
||||||
fatalOnError(err)
|
fatalOnError(err)
|
||||||
|
|
||||||
// clients package uses filtering package's static data (filtering.BlockedSvcKnown()),
|
err = configureOS(config)
|
||||||
// so we have to initialize filtering's static data first,
|
fatalOnError(err)
|
||||||
// but also avoid relying on automatic Go init() function
|
|
||||||
|
// Clients package uses filtering package's static data
|
||||||
|
// (filtering.BlockedSvcKnown()), so we have to initialize filtering static
|
||||||
|
// data first, but also to avoid relying on automatic Go init() function.
|
||||||
filtering.InitModule()
|
filtering.InitModule()
|
||||||
|
|
||||||
err = setupConfig(opts)
|
err = initContextClients()
|
||||||
fatalOnError(err)
|
fatalOnError(err)
|
||||||
|
|
||||||
// TODO(e.burkov): This could be made earlier, probably as the option's
|
err = setupOpts(opts)
|
||||||
|
fatalOnError(err)
|
||||||
|
|
||||||
|
// TODO(e.burkov): This could be made earlier, probably as the option's
|
||||||
// effect.
|
// effect.
|
||||||
cmdlineUpdate(opts)
|
cmdlineUpdate(opts)
|
||||||
|
|
||||||
if !Context.firstRun {
|
if !Context.firstRun {
|
||||||
// Save the updated config
|
// Save the updated config.
|
||||||
err = config.write()
|
err = config.write()
|
||||||
fatalOnError(err)
|
fatalOnError(err)
|
||||||
|
|
||||||
|
@ -476,33 +579,15 @@ func run(opts options, clientBuildFS fs.FS) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = os.MkdirAll(Context.getDataDir(), 0o755)
|
dir := Context.getDataDir()
|
||||||
if err != nil {
|
err = os.MkdirAll(dir, 0o755)
|
||||||
log.Fatalf("Cannot create DNS data dir at %s: %s", Context.getDataDir(), err)
|
fatalOnError(errors.Annotate(err, "creating DNS data dir at %s: %w", dir))
|
||||||
}
|
|
||||||
|
|
||||||
sessFilename := filepath.Join(Context.getDataDir(), "sessions.db")
|
|
||||||
GLMode = opts.glinetMode
|
GLMode = opts.glinetMode
|
||||||
var rateLimiter *authRateLimiter
|
|
||||||
if config.AuthAttempts > 0 && config.AuthBlockMin > 0 {
|
|
||||||
rateLimiter = newAuthRateLimiter(
|
|
||||||
time.Duration(config.AuthBlockMin)*time.Minute,
|
|
||||||
config.AuthAttempts,
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
log.Info("authratelimiter is disabled")
|
|
||||||
}
|
|
||||||
|
|
||||||
Context.auth = InitAuth(
|
// Init auth module.
|
||||||
sessFilename,
|
Context.auth, err = initUsers()
|
||||||
config.Users,
|
fatalOnError(err)
|
||||||
config.WebSessionTTLHours*60*60,
|
|
||||||
rateLimiter,
|
|
||||||
)
|
|
||||||
if Context.auth == nil {
|
|
||||||
log.Fatalf("Couldn't initialize Auth module")
|
|
||||||
}
|
|
||||||
config.Users = nil
|
|
||||||
|
|
||||||
Context.tls, err = newTLSManager(config.TLS)
|
Context.tls, err = newTLSManager(config.TLS)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -520,10 +605,10 @@ func run(opts options, clientBuildFS fs.FS) {
|
||||||
Context.tls.start()
|
Context.tls.start()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
serr := startDNSServer()
|
sErr := startDNSServer()
|
||||||
if serr != nil {
|
if sErr != nil {
|
||||||
closeDNSServer()
|
closeDNSServer()
|
||||||
fatalOnError(serr)
|
fatalOnError(sErr)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -537,10 +622,33 @@ func run(opts options, clientBuildFS fs.FS) {
|
||||||
|
|
||||||
Context.web.start()
|
Context.web.start()
|
||||||
|
|
||||||
// wait indefinitely for other go-routines to complete their job
|
// Wait indefinitely for other goroutines to complete their job.
|
||||||
select {}
|
select {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// initUsers initializes context auth module. Clears config users field.
|
||||||
|
func initUsers() (auth *Auth, err error) {
|
||||||
|
sessFilename := filepath.Join(Context.getDataDir(), "sessions.db")
|
||||||
|
|
||||||
|
var rateLimiter *authRateLimiter
|
||||||
|
if config.AuthAttempts > 0 && config.AuthBlockMin > 0 {
|
||||||
|
blockDur := time.Duration(config.AuthBlockMin) * time.Minute
|
||||||
|
rateLimiter = newAuthRateLimiter(blockDur, config.AuthAttempts)
|
||||||
|
} else {
|
||||||
|
log.Info("authratelimiter is disabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
sessionTTL := config.WebSessionTTLHours * 60 * 60
|
||||||
|
auth = InitAuth(sessFilename, config.Users, sessionTTL, rateLimiter)
|
||||||
|
if auth == nil {
|
||||||
|
return nil, errors.Error("initializing auth module failed")
|
||||||
|
}
|
||||||
|
|
||||||
|
config.Users = nil
|
||||||
|
|
||||||
|
return auth, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *configuration) anonymizer() (ipmut *aghnet.IPMut) {
|
func (c *configuration) anonymizer() (ipmut *aghnet.IPMut) {
|
||||||
var anonFunc aghnet.IPMutFunc
|
var anonFunc aghnet.IPMutFunc
|
||||||
if c.DNS.AnonymizeClientIP {
|
if c.DNS.AnonymizeClientIP {
|
||||||
|
@ -613,22 +721,19 @@ func writePIDFile(fn string) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// initConfigFilename sets up context config file path. This file path can be
|
||||||
|
// overridden by command-line arguments, or is set to default.
|
||||||
func initConfigFilename(opts options) {
|
func initConfigFilename(opts options) {
|
||||||
// config file path can be overridden by command-line arguments:
|
Context.configFilename = stringutil.Coalesce(opts.confFilename, "AdGuardHome.yaml")
|
||||||
if opts.confFilename != "" {
|
|
||||||
Context.configFilename = opts.confFilename
|
|
||||||
} else {
|
|
||||||
// Default config file name
|
|
||||||
Context.configFilename = "AdGuardHome.yaml"
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// initWorkingDir initializes the workDir
|
// initWorkingDir initializes the workDir. If no command-line arguments are
|
||||||
// if no command-line arguments specified, we use the directory where our binary file is located
|
// specified, the directory with the binary file is used.
|
||||||
func initWorkingDir(opts options) {
|
func initWorkingDir(opts options) (err error) {
|
||||||
execPath, err := os.Executable()
|
execPath, err := os.Executable()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
// Don't wrap the error, because it's informative enough as is.
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if opts.workDir != "" {
|
if opts.workDir != "" {
|
||||||
|
@ -640,34 +745,20 @@ func initWorkingDir(opts options) {
|
||||||
|
|
||||||
workDir, err := filepath.EvalSymlinks(Context.workDir)
|
workDir, err := filepath.EvalSymlinks(Context.workDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
// Don't wrap the error, because it's informative enough as is.
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
Context.workDir = workDir
|
Context.workDir = workDir
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// configureLogger configures logger level and output
|
// configureLogger configures logger level and output.
|
||||||
func configureLogger(opts options) {
|
func configureLogger(opts options) (err error) {
|
||||||
ls := getLogSettings()
|
ls := getLogSettings(opts)
|
||||||
|
|
||||||
// command-line arguments can override config settings
|
// Configure logger level.
|
||||||
if opts.verbose || config.Verbose {
|
|
||||||
ls.Verbose = true
|
|
||||||
}
|
|
||||||
if opts.logFile != "" {
|
|
||||||
ls.File = opts.logFile
|
|
||||||
} else if config.File != "" {
|
|
||||||
ls.File = config.File
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle default log settings overrides
|
|
||||||
ls.Compress = config.Compress
|
|
||||||
ls.LocalTime = config.LocalTime
|
|
||||||
ls.MaxBackups = config.MaxBackups
|
|
||||||
ls.MaxSize = config.MaxSize
|
|
||||||
ls.MaxAge = config.MaxAge
|
|
||||||
|
|
||||||
// log.SetLevel(log.INFO) - default
|
|
||||||
if ls.Verbose {
|
if ls.Verbose {
|
||||||
log.SetLevel(log.DEBUG)
|
log.SetLevel(log.DEBUG)
|
||||||
}
|
}
|
||||||
|
@ -676,38 +767,63 @@ func configureLogger(opts options) {
|
||||||
// happen pretty quickly.
|
// happen pretty quickly.
|
||||||
log.SetFlags(log.LstdFlags | log.Lmicroseconds)
|
log.SetFlags(log.LstdFlags | log.Lmicroseconds)
|
||||||
|
|
||||||
if opts.runningAsService && ls.File == "" && runtime.GOOS == "windows" {
|
// Write logs to stdout by default.
|
||||||
// When running as a Windows service, use eventlog by default if nothing
|
|
||||||
// else is configured. Otherwise, we'll simply lose the log output.
|
|
||||||
ls.File = configSyslog
|
|
||||||
}
|
|
||||||
|
|
||||||
// logs are written to stdout (default)
|
|
||||||
if ls.File == "" {
|
if ls.File == "" {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if ls.File == configSyslog {
|
if ls.File == configSyslog {
|
||||||
// Use syslog where it is possible and eventlog on Windows
|
// Use syslog where it is possible and eventlog on Windows.
|
||||||
err := aghos.ConfigureSyslog(serviceName)
|
err = aghos.ConfigureSyslog(serviceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("cannot initialize syslog: %s", err)
|
return fmt.Errorf("cannot initialize syslog: %w", err)
|
||||||
}
|
|
||||||
} else {
|
|
||||||
logFilePath := ls.File
|
|
||||||
if !filepath.IsAbs(logFilePath) {
|
|
||||||
logFilePath = filepath.Join(Context.workDir, logFilePath)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
log.SetOutput(&lumberjack.Logger{
|
return nil
|
||||||
Filename: logFilePath,
|
|
||||||
Compress: ls.Compress, // disabled by default
|
|
||||||
LocalTime: ls.LocalTime,
|
|
||||||
MaxBackups: ls.MaxBackups,
|
|
||||||
MaxSize: ls.MaxSize, // megabytes
|
|
||||||
MaxAge: ls.MaxAge, // days
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logFilePath := ls.File
|
||||||
|
if !filepath.IsAbs(logFilePath) {
|
||||||
|
logFilePath = filepath.Join(Context.workDir, logFilePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.SetOutput(&lumberjack.Logger{
|
||||||
|
Filename: logFilePath,
|
||||||
|
Compress: ls.Compress,
|
||||||
|
LocalTime: ls.LocalTime,
|
||||||
|
MaxBackups: ls.MaxBackups,
|
||||||
|
MaxSize: ls.MaxSize,
|
||||||
|
MaxAge: ls.MaxAge,
|
||||||
|
})
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getLogSettings returns a log settings object properly initialized from opts.
|
||||||
|
func getLogSettings(opts options) (ls *logSettings) {
|
||||||
|
ls = readLogSettings()
|
||||||
|
|
||||||
|
// Command-line arguments can override config settings.
|
||||||
|
if opts.verbose || config.Verbose {
|
||||||
|
ls.Verbose = true
|
||||||
|
}
|
||||||
|
|
||||||
|
ls.File = stringutil.Coalesce(opts.logFile, config.File, ls.File)
|
||||||
|
|
||||||
|
// Handle default log settings overrides.
|
||||||
|
ls.Compress = config.Compress
|
||||||
|
ls.LocalTime = config.LocalTime
|
||||||
|
ls.MaxBackups = config.MaxBackups
|
||||||
|
ls.MaxSize = config.MaxSize
|
||||||
|
ls.MaxAge = config.MaxAge
|
||||||
|
|
||||||
|
if opts.runningAsService && ls.File == "" && runtime.GOOS == "windows" {
|
||||||
|
// When running as a Windows service, use eventlog by default if
|
||||||
|
// nothing else is configured. Otherwise, we'll lose the log output.
|
||||||
|
ls.File = configSyslog
|
||||||
|
}
|
||||||
|
|
||||||
|
return ls
|
||||||
}
|
}
|
||||||
|
|
||||||
// cleanup stops and resets all the modules.
|
// cleanup stops and resets all the modules.
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
"runtime"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -84,14 +83,9 @@ func svcStatus(s service.Service) (status service.Status, err error) {
|
||||||
// On OpenWrt, the service utility may not exist. We use our service script
|
// On OpenWrt, the service utility may not exist. We use our service script
|
||||||
// directly in this case.
|
// directly in this case.
|
||||||
func svcAction(s service.Service, action string) (err error) {
|
func svcAction(s service.Service, action string) (err error) {
|
||||||
if runtime.GOOS == "darwin" && action == "start" {
|
if action == "start" {
|
||||||
var exe string
|
if err = aghos.PreCheckActionStart(); err != nil {
|
||||||
if exe, err = os.Executable(); err != nil {
|
log.Error("starting service: %s", err)
|
||||||
log.Error("starting service: getting executable path: %s", err)
|
|
||||||
} else if exe, err = filepath.EvalSymlinks(exe); err != nil {
|
|
||||||
log.Error("starting service: evaluating executable symlinks: %s", err)
|
|
||||||
} else if !strings.HasPrefix(exe, "/Applications/") {
|
|
||||||
log.Info("warning: service must be started from within the /Applications directory")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,8 +93,6 @@ func svcAction(s service.Service, action string) (err error) {
|
||||||
if err != nil && service.Platform() == "unix-systemv" &&
|
if err != nil && service.Platform() == "unix-systemv" &&
|
||||||
(action == "start" || action == "stop" || action == "restart") {
|
(action == "start" || action == "stop" || action == "restart") {
|
||||||
_, err = runInitdCommand(action)
|
_, err = runInitdCommand(action)
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return err
|
return err
|
||||||
|
@ -224,6 +216,7 @@ func handleServiceControlAction(opts options, clientBuildFS fs.FS) {
|
||||||
|
|
||||||
runOpts := opts
|
runOpts := opts
|
||||||
runOpts.serviceControlAction = "run"
|
runOpts.serviceControlAction = "run"
|
||||||
|
|
||||||
svcConfig := &service.Config{
|
svcConfig := &service.Config{
|
||||||
Name: serviceName,
|
Name: serviceName,
|
||||||
DisplayName: serviceDisplayName,
|
DisplayName: serviceDisplayName,
|
||||||
|
@ -233,35 +226,48 @@ func handleServiceControlAction(opts options, clientBuildFS fs.FS) {
|
||||||
}
|
}
|
||||||
configureService(svcConfig)
|
configureService(svcConfig)
|
||||||
|
|
||||||
prg := &program{
|
s, err := service.New(&program{clientBuildFS: clientBuildFS, opts: runOpts}, svcConfig)
|
||||||
clientBuildFS: clientBuildFS,
|
if err != nil {
|
||||||
opts: runOpts,
|
|
||||||
}
|
|
||||||
var s service.Service
|
|
||||||
if s, err = service.New(prg, svcConfig); err != nil {
|
|
||||||
log.Fatalf("service: initializing service: %s", err)
|
log.Fatalf("service: initializing service: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = handleServiceCommand(s, action, opts)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("service: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Printf(
|
||||||
|
"service: action %s has been done successfully on %s",
|
||||||
|
action,
|
||||||
|
service.ChosenSystem(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleServiceCommand handles service command.
|
||||||
|
func handleServiceCommand(s service.Service, action string, opts options) (err error) {
|
||||||
switch action {
|
switch action {
|
||||||
case "status":
|
case "status":
|
||||||
handleServiceStatusCommand(s)
|
handleServiceStatusCommand(s)
|
||||||
case "run":
|
case "run":
|
||||||
if err = s.Run(); err != nil {
|
if err = s.Run(); err != nil {
|
||||||
log.Fatalf("service: failed to run service: %s", err)
|
return fmt.Errorf("failed to run service: %w", err)
|
||||||
}
|
}
|
||||||
case "install":
|
case "install":
|
||||||
initConfigFilename(opts)
|
initConfigFilename(opts)
|
||||||
initWorkingDir(opts)
|
if err = initWorkingDir(opts); err != nil {
|
||||||
|
return fmt.Errorf("failed to init working dir: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
handleServiceInstallCommand(s)
|
handleServiceInstallCommand(s)
|
||||||
case "uninstall":
|
case "uninstall":
|
||||||
handleServiceUninstallCommand(s)
|
handleServiceUninstallCommand(s)
|
||||||
default:
|
default:
|
||||||
if err = svcAction(s, action); err != nil {
|
if err = svcAction(s, action); err != nil {
|
||||||
log.Fatalf("service: executing action %q: %s", action, err)
|
return fmt.Errorf("executing action %q: %w", action, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("service: action %s has been done successfully on %s", action, service.ChosenSystem())
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// handleServiceStatusCommand handles service "status" command.
|
// handleServiceStatusCommand handles service "status" command.
|
||||||
|
|
|
@ -172,9 +172,32 @@ func loadTLSConf(tlsConf *tlsConfigSettings, status *tlsConfigStatus) (err error
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
tlsConf.CertificateChainData = []byte(tlsConf.CertificateChain)
|
err = loadCertificateChainData(tlsConf, status)
|
||||||
tlsConf.PrivateKeyData = []byte(tlsConf.PrivateKey)
|
if err != nil {
|
||||||
|
// Don't wrap the error, because it's informative enough as is.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = loadPrivateKeyData(tlsConf, status)
|
||||||
|
if err != nil {
|
||||||
|
// Don't wrap the error, because it's informative enough as is.
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = validateCertificates(
|
||||||
|
status,
|
||||||
|
tlsConf.CertificateChainData,
|
||||||
|
tlsConf.PrivateKeyData,
|
||||||
|
tlsConf.ServerName,
|
||||||
|
)
|
||||||
|
|
||||||
|
return errors.Annotate(err, "validating certificate pair: %w")
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadCertificateChainData loads PEM-encoded certificates chain data to the
|
||||||
|
// TLS configuration.
|
||||||
|
func loadCertificateChainData(tlsConf *tlsConfigSettings, status *tlsConfigStatus) (err error) {
|
||||||
|
tlsConf.CertificateChainData = []byte(tlsConf.CertificateChain)
|
||||||
if tlsConf.CertificatePath != "" {
|
if tlsConf.CertificatePath != "" {
|
||||||
if tlsConf.CertificateChain != "" {
|
if tlsConf.CertificateChain != "" {
|
||||||
return errors.Error("certificate data and file can't be set together")
|
return errors.Error("certificate data and file can't be set together")
|
||||||
|
@ -190,6 +213,13 @@ func loadTLSConf(tlsConf *tlsConfigSettings, status *tlsConfigStatus) (err error
|
||||||
status.ValidCert = true
|
status.ValidCert = true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// loadPrivateKeyData loads PEM-encoded private key data to the TLS
|
||||||
|
// configuration.
|
||||||
|
func loadPrivateKeyData(tlsConf *tlsConfigSettings, status *tlsConfigStatus) (err error) {
|
||||||
|
tlsConf.PrivateKeyData = []byte(tlsConf.PrivateKey)
|
||||||
if tlsConf.PrivateKeyPath != "" {
|
if tlsConf.PrivateKeyPath != "" {
|
||||||
if tlsConf.PrivateKey != "" {
|
if tlsConf.PrivateKey != "" {
|
||||||
return errors.Error("private key data and file can't be set together")
|
return errors.Error("private key data and file can't be set together")
|
||||||
|
@ -203,16 +233,6 @@ func loadTLSConf(tlsConf *tlsConfigSettings, status *tlsConfigStatus) (err error
|
||||||
status.ValidKey = true
|
status.ValidKey = true
|
||||||
}
|
}
|
||||||
|
|
||||||
err = validateCertificates(
|
|
||||||
status,
|
|
||||||
tlsConf.CertificateChainData,
|
|
||||||
tlsConf.PrivateKeyData,
|
|
||||||
tlsConf.ServerName,
|
|
||||||
)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("validating certificate pair: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,8 @@ func upgradeConfig() error {
|
||||||
|
|
||||||
err = yaml.Unmarshal(body, &diskConf)
|
err = yaml.Unmarshal(body, &diskConf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("Couldn't parse config file: %s", err)
|
log.Printf("parsing config file for upgrade: %s", err)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -293,71 +294,61 @@ func upgradeSchema4to5(diskConf yobj) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// clients:
|
// upgradeSchema5to6 performs the following changes:
|
||||||
// ...
|
|
||||||
//
|
//
|
||||||
// ip: 127.0.0.1
|
// # BEFORE:
|
||||||
// mac: ...
|
// 'clients':
|
||||||
|
// ...
|
||||||
|
// 'ip': 127.0.0.1
|
||||||
|
// 'mac': ...
|
||||||
//
|
//
|
||||||
// ->
|
// # AFTER:
|
||||||
//
|
// 'clients':
|
||||||
// clients:
|
// ...
|
||||||
// ...
|
// 'ids':
|
||||||
//
|
// - 127.0.0.1
|
||||||
// ids:
|
// - ...
|
||||||
// - 127.0.0.1
|
|
||||||
// - ...
|
|
||||||
func upgradeSchema5to6(diskConf yobj) error {
|
func upgradeSchema5to6(diskConf yobj) error {
|
||||||
log.Printf("%s(): called", funcName())
|
log.Printf("Upgrade yaml: 5 to 6")
|
||||||
|
|
||||||
diskConf["schema_version"] = 6
|
diskConf["schema_version"] = 6
|
||||||
|
|
||||||
clients, ok := diskConf["clients"]
|
clientsVal, ok := diskConf["clients"]
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
switch arr := clients.(type) {
|
clients, ok := clientsVal.([]yobj)
|
||||||
case []any:
|
if !ok {
|
||||||
for i := range arr {
|
return fmt.Errorf("unexpected type of clients: %T", clientsVal)
|
||||||
switch c := arr[i].(type) {
|
}
|
||||||
case map[any]any:
|
|
||||||
var ipVal any
|
|
||||||
ipVal, ok = c["ip"]
|
|
||||||
ids := []string{}
|
|
||||||
if ok {
|
|
||||||
var ip string
|
|
||||||
ip, ok = ipVal.(string)
|
|
||||||
if !ok {
|
|
||||||
log.Fatalf("client.ip is not a string: %v", ipVal)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if len(ip) != 0 {
|
|
||||||
ids = append(ids, ip)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var macVal any
|
for i := range clients {
|
||||||
macVal, ok = c["mac"]
|
c := clients[i]
|
||||||
if ok {
|
var ids []string
|
||||||
var mac string
|
|
||||||
mac, ok = macVal.(string)
|
|
||||||
if !ok {
|
|
||||||
log.Fatalf("client.mac is not a string: %v", macVal)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if len(mac) != 0 {
|
|
||||||
ids = append(ids, mac)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
c["ids"] = ids
|
if ipVal, hasIP := c["ip"]; hasIP {
|
||||||
default:
|
var ip string
|
||||||
continue
|
if ip, ok = ipVal.(string); !ok {
|
||||||
|
return fmt.Errorf("client.ip is not a string: %v", ipVal)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ip != "" {
|
||||||
|
ids = append(ids, ip)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
default:
|
|
||||||
return nil
|
if macVal, hasMac := c["mac"]; hasMac {
|
||||||
|
var mac string
|
||||||
|
if mac, ok = macVal.(string); !ok {
|
||||||
|
return fmt.Errorf("client.mac is not a string: %v", macVal)
|
||||||
|
}
|
||||||
|
|
||||||
|
if mac != "" {
|
||||||
|
ids = append(ids, mac)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c["ids"] = ids
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -68,6 +68,95 @@ func TestUpgradeSchema2to3(t *testing.T) {
|
||||||
assertEqualExcept(t, oldDiskConf, diskConf, excludedEntries, excludedEntries)
|
assertEqualExcept(t, oldDiskConf, diskConf, excludedEntries, excludedEntries)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestUpgradeSchema5to6(t *testing.T) {
|
||||||
|
const newSchemaVer = 6
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
in yobj
|
||||||
|
want yobj
|
||||||
|
wantErr string
|
||||||
|
name string
|
||||||
|
}{{
|
||||||
|
in: yobj{
|
||||||
|
"clients": []yobj{},
|
||||||
|
},
|
||||||
|
want: yobj{
|
||||||
|
"clients": []yobj{},
|
||||||
|
"schema_version": newSchemaVer,
|
||||||
|
},
|
||||||
|
wantErr: "",
|
||||||
|
name: "no_clients",
|
||||||
|
}, {
|
||||||
|
in: yobj{
|
||||||
|
"clients": []yobj{{"ip": "127.0.0.1"}},
|
||||||
|
},
|
||||||
|
want: yobj{
|
||||||
|
"clients": []yobj{{
|
||||||
|
"ids": []string{"127.0.0.1"},
|
||||||
|
"ip": "127.0.0.1",
|
||||||
|
}},
|
||||||
|
"schema_version": newSchemaVer,
|
||||||
|
},
|
||||||
|
wantErr: "",
|
||||||
|
name: "client_ip",
|
||||||
|
}, {
|
||||||
|
in: yobj{
|
||||||
|
"clients": []yobj{{"mac": "mac"}},
|
||||||
|
},
|
||||||
|
want: yobj{
|
||||||
|
"clients": []yobj{{
|
||||||
|
"ids": []string{"mac"},
|
||||||
|
"mac": "mac",
|
||||||
|
}},
|
||||||
|
"schema_version": newSchemaVer,
|
||||||
|
},
|
||||||
|
wantErr: "",
|
||||||
|
name: "client_mac",
|
||||||
|
}, {
|
||||||
|
in: yobj{
|
||||||
|
"clients": []yobj{{"ip": "127.0.0.1", "mac": "mac"}},
|
||||||
|
},
|
||||||
|
want: yobj{
|
||||||
|
"clients": []yobj{{
|
||||||
|
"ids": []string{"127.0.0.1", "mac"},
|
||||||
|
"ip": "127.0.0.1",
|
||||||
|
"mac": "mac",
|
||||||
|
}},
|
||||||
|
"schema_version": newSchemaVer,
|
||||||
|
},
|
||||||
|
wantErr: "",
|
||||||
|
name: "client_ip_mac",
|
||||||
|
}, {
|
||||||
|
in: yobj{
|
||||||
|
"clients": []yobj{{"ip": 1, "mac": "mac"}},
|
||||||
|
},
|
||||||
|
want: yobj{
|
||||||
|
"clients": []yobj{{"ip": 1, "mac": "mac"}},
|
||||||
|
"schema_version": newSchemaVer,
|
||||||
|
},
|
||||||
|
wantErr: "client.ip is not a string: 1",
|
||||||
|
name: "inv_client_ip",
|
||||||
|
}, {
|
||||||
|
in: yobj{
|
||||||
|
"clients": []yobj{{"ip": "127.0.0.1", "mac": 1}},
|
||||||
|
},
|
||||||
|
want: yobj{
|
||||||
|
"clients": []yobj{{"ip": "127.0.0.1", "mac": 1}},
|
||||||
|
"schema_version": newSchemaVer,
|
||||||
|
},
|
||||||
|
wantErr: "client.mac is not a string: 1",
|
||||||
|
name: "inv_client_mac",
|
||||||
|
}}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
err := upgradeSchema5to6(tc.in)
|
||||||
|
testutil.AssertErrorMsg(t, tc.wantErr, err)
|
||||||
|
assert.Equal(t, tc.want, tc.in)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestUpgradeSchema7to8(t *testing.T) {
|
func TestUpgradeSchema7to8(t *testing.T) {
|
||||||
const host = "1.2.3.4"
|
const host = "1.2.3.4"
|
||||||
oldConf := yobj{
|
oldConf := yobj{
|
||||||
|
|
|
@ -3,19 +3,24 @@ package querylog
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
|
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
|
||||||
|
"github.com/AdguardTeam/golibs/errors"
|
||||||
"github.com/AdguardTeam/golibs/log"
|
"github.com/AdguardTeam/golibs/log"
|
||||||
"github.com/AdguardTeam/urlfilter/rules"
|
"github.com/AdguardTeam/urlfilter/rules"
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// logEntryHandler represents a handler for decoding json token to the logEntry
|
||||||
|
// struct.
|
||||||
type logEntryHandler func(t json.Token, ent *logEntry) error
|
type logEntryHandler func(t json.Token, ent *logEntry) error
|
||||||
|
|
||||||
|
// logEntryHandlers is the map of log entry decode handlers for various keys.
|
||||||
var logEntryHandlers = map[string]logEntryHandler{
|
var logEntryHandlers = map[string]logEntryHandler{
|
||||||
"CID": func(t json.Token, ent *logEntry) error {
|
"CID": func(t json.Token, ent *logEntry) error {
|
||||||
v, ok := t.(string)
|
v, ok := t.(string)
|
||||||
|
@ -166,6 +171,7 @@ var logEntryHandlers = map[string]logEntryHandler{
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// decodeResultRuleKey decodes the token of "Rules" type to logEntry struct.
|
||||||
func decodeResultRuleKey(key string, i int, dec *json.Decoder, ent *logEntry) {
|
func decodeResultRuleKey(key string, i int, dec *json.Decoder, ent *logEntry) {
|
||||||
var vToken json.Token
|
var vToken json.Token
|
||||||
switch key {
|
switch key {
|
||||||
|
@ -189,6 +195,8 @@ func decodeResultRuleKey(key string, i int, dec *json.Decoder, ent *logEntry) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// decodeVTokenAndAddRule decodes the "Rules" toke as [filtering.ResultRule]
|
||||||
|
// and then adds the decoded object to the slice of result rules.
|
||||||
func decodeVTokenAndAddRule(
|
func decodeVTokenAndAddRule(
|
||||||
key string,
|
key string,
|
||||||
i int,
|
i int,
|
||||||
|
@ -213,6 +221,8 @@ func decodeVTokenAndAddRule(
|
||||||
return newRules, vToken
|
return newRules, vToken
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// decodeResultRules parses the dec's tokens into logEntry ent interpreting it
|
||||||
|
// as a slice of the result rules.
|
||||||
func decodeResultRules(dec *json.Decoder, ent *logEntry) {
|
func decodeResultRules(dec *json.Decoder, ent *logEntry) {
|
||||||
for {
|
for {
|
||||||
delimToken, err := dec.Token()
|
delimToken, err := dec.Token()
|
||||||
|
@ -224,48 +234,53 @@ func decodeResultRules(dec *json.Decoder, ent *logEntry) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if d, ok := delimToken.(json.Delim); ok {
|
if d, ok := delimToken.(json.Delim); !ok {
|
||||||
if d != '[' {
|
return
|
||||||
log.Debug("decodeResultRules: unexpected delim %q", d)
|
} else if d != '[' {
|
||||||
|
log.Debug("decodeResultRules: unexpected delim %q", d)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = decodeResultRuleToken(dec, ent)
|
||||||
|
if err != nil {
|
||||||
|
if err != io.EOF && !errors.Is(err, ErrEndOfToken) {
|
||||||
|
log.Debug("decodeResultRules err: %s", err)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
i := 0
|
// decodeResultRuleToken decodes the tokens of "Rules" type to the logEntry ent.
|
||||||
for {
|
func decodeResultRuleToken(dec *json.Decoder, ent *logEntry) (err error) {
|
||||||
var keyToken json.Token
|
i := 0
|
||||||
keyToken, err = dec.Token()
|
for {
|
||||||
if err != nil {
|
var keyToken json.Token
|
||||||
if err != io.EOF {
|
keyToken, err = dec.Token()
|
||||||
log.Debug("decodeResultRules err: %s", err)
|
if err != nil {
|
||||||
}
|
// Don't wrap the error, because it's informative enough as is.
|
||||||
|
return err
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if d, ok := keyToken.(json.Delim); ok {
|
|
||||||
switch d {
|
|
||||||
case '}':
|
|
||||||
i++
|
|
||||||
case ']':
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
// Go on.
|
|
||||||
}
|
|
||||||
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
key, ok := keyToken.(string)
|
|
||||||
if !ok {
|
|
||||||
log.Debug("decodeResultRules: keyToken is %T (%[1]v) and not string", keyToken)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
decodeResultRuleKey(key, i, dec, ent)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if d, ok := keyToken.(json.Delim); ok {
|
||||||
|
switch d {
|
||||||
|
case '}':
|
||||||
|
i++
|
||||||
|
case ']':
|
||||||
|
return ErrEndOfToken
|
||||||
|
default:
|
||||||
|
// Go on.
|
||||||
|
}
|
||||||
|
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
key, ok := keyToken.(string)
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("keyToken is %T (%[1]v) and not string", keyToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
decodeResultRuleKey(key, i, dec, ent)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -322,6 +337,8 @@ func decodeResultReverseHosts(dec *json.Decoder, ent *logEntry) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// decodeResultIPList parses the dec's tokens into logEntry ent interpreting it
|
||||||
|
// as the result IP addresses list.
|
||||||
func decodeResultIPList(dec *json.Decoder, ent *logEntry) {
|
func decodeResultIPList(dec *json.Decoder, ent *logEntry) {
|
||||||
for {
|
for {
|
||||||
itemToken, err := dec.Token()
|
itemToken, err := dec.Token()
|
||||||
|
@ -355,6 +372,8 @@ func decodeResultIPList(dec *json.Decoder, ent *logEntry) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// decodeResultDNSRewriteResultKey decodes the token of "DNSRewriteResult" type
|
||||||
|
// to the logEntry struct.
|
||||||
func decodeResultDNSRewriteResultKey(key string, dec *json.Decoder, ent *logEntry) {
|
func decodeResultDNSRewriteResultKey(key string, dec *json.Decoder, ent *logEntry) {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
|
@ -395,50 +414,29 @@ func decodeResultDNSRewriteResultKey(key string, dec *json.Decoder, ent *logEntr
|
||||||
log.Debug("decodeResultDNSRewriteResultKey response err: %s", err)
|
log.Debug("decodeResultDNSRewriteResultKey response err: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
for rrType, rrValues := range ent.Result.DNSRewriteResult.Response {
|
ent.parseDNSRewriteResultIPs()
|
||||||
switch rrType {
|
|
||||||
case
|
|
||||||
dns.TypeA,
|
|
||||||
dns.TypeAAAA:
|
|
||||||
for i, v := range rrValues {
|
|
||||||
s, _ := v.(string)
|
|
||||||
rrValues[i] = net.ParseIP(s)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
// Go on.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
// Go on.
|
// Go on.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// decodeResultDNSRewriteResult parses the dec's tokens into logEntry ent
|
||||||
|
// interpreting it as the result DNSRewriteResult.
|
||||||
func decodeResultDNSRewriteResult(dec *json.Decoder, ent *logEntry) {
|
func decodeResultDNSRewriteResult(dec *json.Decoder, ent *logEntry) {
|
||||||
for {
|
for {
|
||||||
keyToken, err := dec.Token()
|
key, err := parseKeyToken(dec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != io.EOF {
|
if err != io.EOF && !errors.Is(err, ErrEndOfToken) {
|
||||||
log.Debug("decodeResultDNSRewriteResult err: %s", err)
|
log.Debug("decodeResultDNSRewriteResult: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if d, ok := keyToken.(json.Delim); ok {
|
if key == "" {
|
||||||
if d == '}' {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
key, ok := keyToken.(string)
|
|
||||||
if !ok {
|
|
||||||
log.Debug("decodeResultDNSRewriteResult: keyToken is %T (%[1]v) and not string", keyToken)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
decodeResultDNSRewriteResultKey(key, dec, ent)
|
decodeResultDNSRewriteResultKey(key, dec, ent)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -474,34 +472,51 @@ func translateResult(ent *logEntry) {
|
||||||
res.IPList = nil
|
res.IPList = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ErrEndOfToken is an error returned by parse key token when the closing
|
||||||
|
// bracket is found.
|
||||||
|
const ErrEndOfToken errors.Error = "end of token"
|
||||||
|
|
||||||
|
// parseKeyToken parses the dec's token key.
|
||||||
|
func parseKeyToken(dec *json.Decoder) (key string, err error) {
|
||||||
|
keyToken, err := dec.Token()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if d, ok := keyToken.(json.Delim); ok {
|
||||||
|
if d == '}' {
|
||||||
|
return "", ErrEndOfToken
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
key, ok := keyToken.(string)
|
||||||
|
if !ok {
|
||||||
|
return "", fmt.Errorf("keyToken is %T (%[1]v) and not string", keyToken)
|
||||||
|
}
|
||||||
|
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// decodeResult decodes a token of "Result" type to logEntry struct.
|
||||||
func decodeResult(dec *json.Decoder, ent *logEntry) {
|
func decodeResult(dec *json.Decoder, ent *logEntry) {
|
||||||
defer translateResult(ent)
|
defer translateResult(ent)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
keyToken, err := dec.Token()
|
key, err := parseKeyToken(dec)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err != io.EOF {
|
if err != io.EOF && !errors.Is(err, ErrEndOfToken) {
|
||||||
log.Debug("decodeResult err: %s", err)
|
log.Debug("decodeResult: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if d, ok := keyToken.(json.Delim); ok {
|
if key == "" {
|
||||||
if d == '}' {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
key, ok := keyToken.(string)
|
|
||||||
if !ok {
|
|
||||||
log.Debug("decodeResult: keyToken is %T (%[1]v) and not string", keyToken)
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
decHandler, ok := resultDecHandlers[key]
|
decHandler, ok := resultDecHandlers[key]
|
||||||
if ok {
|
if ok {
|
||||||
decHandler(dec, ent)
|
decHandler(dec, ent)
|
||||||
|
@ -527,13 +542,16 @@ func decodeResult(dec *json.Decoder, ent *logEntry) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// resultHandlers is the map of log entry decode handlers for various keys.
|
||||||
var resultHandlers = map[string]logEntryHandler{
|
var resultHandlers = map[string]logEntryHandler{
|
||||||
"IsFiltered": func(t json.Token, ent *logEntry) error {
|
"IsFiltered": func(t json.Token, ent *logEntry) error {
|
||||||
v, ok := t.(bool)
|
v, ok := t.(bool)
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ent.Result.IsFiltered = v
|
ent.Result.IsFiltered = v
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
"Rule": func(t json.Token, ent *logEntry) error {
|
"Rule": func(t json.Token, ent *logEntry) error {
|
||||||
|
@ -578,11 +596,14 @@ var resultHandlers = map[string]logEntryHandler{
|
||||||
if !ok {
|
if !ok {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
i, err := v.Int64()
|
i, err := v.Int64()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ent.Result.Reason = filtering.Reason(i)
|
ent.Result.Reason = filtering.Reason(i)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
},
|
},
|
||||||
"ServiceName": func(t json.Token, ent *logEntry) error {
|
"ServiceName": func(t json.Token, ent *logEntry) error {
|
||||||
|
@ -607,6 +628,7 @@ var resultHandlers = map[string]logEntryHandler{
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// resultDecHandlers is the map of decode handlers for various keys.
|
||||||
var resultDecHandlers = map[string]func(dec *json.Decoder, ent *logEntry){
|
var resultDecHandlers = map[string]func(dec *json.Decoder, ent *logEntry){
|
||||||
"ReverseHosts": decodeResultReverseHosts,
|
"ReverseHosts": decodeResultReverseHosts,
|
||||||
"IPList": decodeResultIPList,
|
"IPList": decodeResultIPList,
|
||||||
|
@ -614,9 +636,11 @@ var resultDecHandlers = map[string]func(dec *json.Decoder, ent *logEntry){
|
||||||
"DNSRewriteResult": decodeResultDNSRewriteResult,
|
"DNSRewriteResult": decodeResultDNSRewriteResult,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// decodeLogEntry decodes string str to logEntry ent.
|
||||||
func decodeLogEntry(ent *logEntry, str string) {
|
func decodeLogEntry(ent *logEntry, str string) {
|
||||||
dec := json.NewDecoder(strings.NewReader(str))
|
dec := json.NewDecoder(strings.NewReader(str))
|
||||||
dec.UseNumber()
|
dec.UseNumber()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
keyToken, err := dec.Token()
|
keyToken, err := dec.Token()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -182,8 +182,7 @@ func TestDecodeLogEntry(t *testing.T) {
|
||||||
if tc.want == "" {
|
if tc.want == "" {
|
||||||
assert.Empty(t, s)
|
assert.Empty(t, s)
|
||||||
} else {
|
} else {
|
||||||
assert.True(t, strings.HasSuffix(s, tc.want),
|
assert.True(t, strings.HasSuffix(s, tc.want), "got %q", s)
|
||||||
"got %q", s)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
logOutput.Reset()
|
logOutput.Reset()
|
||||||
|
|
|
@ -68,3 +68,19 @@ func (e *logEntry) addResponse(resp *dns.Msg, isOrig bool) {
|
||||||
log.Error("querylog: %s", err)
|
log.Error("querylog: %s", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseDNSRewriteResultIPs fills logEntry's DNSRewriteResult response records
|
||||||
|
// with the IP addresses parsed from the raw strings.
|
||||||
|
func (e *logEntry) parseDNSRewriteResultIPs() {
|
||||||
|
for rrType, rrValues := range e.Result.DNSRewriteResult.Response {
|
||||||
|
switch rrType {
|
||||||
|
case dns.TypeA, dns.TypeAAAA:
|
||||||
|
for i, v := range rrValues {
|
||||||
|
s, _ := v.(string)
|
||||||
|
rrValues[i] = net.ParseIP(s)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// Go on.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -16,32 +16,35 @@ import (
|
||||||
"github.com/miekg/dns"
|
"github.com/miekg/dns"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
// queryLogFileName is a name of the log file. ".gz" extension is added later
|
||||||
queryLogFileName = "querylog.json" // .gz added during compression
|
// during compression.
|
||||||
)
|
const queryLogFileName = "querylog.json"
|
||||||
|
|
||||||
// queryLog is a structure that writes and reads the DNS query log
|
// queryLog is a structure that writes and reads the DNS query log.
|
||||||
type queryLog struct {
|
type queryLog struct {
|
||||||
findClient func(ids []string) (c *Client, err error)
|
|
||||||
|
|
||||||
// confMu protects conf.
|
// confMu protects conf.
|
||||||
confMu *sync.RWMutex
|
confMu *sync.RWMutex
|
||||||
conf *Config
|
|
||||||
|
conf *Config
|
||||||
|
anonymizer *aghnet.IPMut
|
||||||
|
|
||||||
|
findClient func(ids []string) (c *Client, err error)
|
||||||
|
|
||||||
// logFile is the path to the log file.
|
// logFile is the path to the log file.
|
||||||
logFile string
|
logFile string
|
||||||
|
|
||||||
// bufferLock protects buffer.
|
|
||||||
bufferLock sync.RWMutex
|
|
||||||
// buffer contains recent log entries. The entries in this buffer must not
|
// buffer contains recent log entries. The entries in this buffer must not
|
||||||
// be modified.
|
// be modified.
|
||||||
buffer []*logEntry
|
buffer []*logEntry
|
||||||
|
|
||||||
fileFlushLock sync.Mutex // synchronize a file-flushing goroutine and main thread
|
// bufferLock protects buffer.
|
||||||
flushPending bool // don't start another goroutine while the previous one is still running
|
bufferLock sync.RWMutex
|
||||||
|
|
||||||
|
// fileFlushLock synchronizes a file-flushing goroutine and main thread.
|
||||||
|
fileFlushLock sync.Mutex
|
||||||
fileWriteLock sync.Mutex
|
fileWriteLock sync.Mutex
|
||||||
|
|
||||||
anonymizer *aghnet.IPMut
|
flushPending bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientProto values are names of the client protocols.
|
// ClientProto values are names of the client protocols.
|
||||||
|
@ -155,6 +158,43 @@ func (l *queryLog) clear() {
|
||||||
log.Debug("querylog: cleared")
|
log.Debug("querylog: cleared")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// newLogEntry creates an instance of logEntry from parameters.
|
||||||
|
func newLogEntry(params *AddParams) (entry *logEntry) {
|
||||||
|
q := params.Question.Question[0]
|
||||||
|
|
||||||
|
entry = &logEntry{
|
||||||
|
// TODO(d.kolyshev): Export this timestamp to func params.
|
||||||
|
Time: time.Now(),
|
||||||
|
|
||||||
|
QHost: strings.ToLower(q.Name[:len(q.Name)-1]),
|
||||||
|
QType: dns.Type(q.Qtype).String(),
|
||||||
|
QClass: dns.Class(q.Qclass).String(),
|
||||||
|
|
||||||
|
ClientID: params.ClientID,
|
||||||
|
ClientProto: params.ClientProto,
|
||||||
|
|
||||||
|
Result: *params.Result,
|
||||||
|
Upstream: params.Upstream,
|
||||||
|
|
||||||
|
IP: params.ClientIP,
|
||||||
|
|
||||||
|
Elapsed: params.Elapsed,
|
||||||
|
|
||||||
|
Cached: params.Cached,
|
||||||
|
AuthenticatedData: params.AuthenticatedData,
|
||||||
|
}
|
||||||
|
|
||||||
|
if params.ReqECS != nil {
|
||||||
|
entry.ReqECS = params.ReqECS.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
entry.addResponse(params.Answer, false)
|
||||||
|
entry.addResponse(params.OrigAnswer, true)
|
||||||
|
|
||||||
|
return entry
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add implements the [QueryLog] interface for *queryLog.
|
||||||
func (l *queryLog) Add(params *AddParams) {
|
func (l *queryLog) Add(params *AddParams) {
|
||||||
var isEnabled, fileIsEnabled bool
|
var isEnabled, fileIsEnabled bool
|
||||||
var memSize uint32
|
var memSize uint32
|
||||||
|
@ -181,35 +221,7 @@ func (l *queryLog) Add(params *AddParams) {
|
||||||
params.Result = &filtering.Result{}
|
params.Result = &filtering.Result{}
|
||||||
}
|
}
|
||||||
|
|
||||||
now := time.Now()
|
entry := newLogEntry(params)
|
||||||
q := params.Question.Question[0]
|
|
||||||
entry := &logEntry{
|
|
||||||
Time: now,
|
|
||||||
|
|
||||||
QHost: strings.ToLower(q.Name[:len(q.Name)-1]),
|
|
||||||
QType: dns.Type(q.Qtype).String(),
|
|
||||||
QClass: dns.Class(q.Qclass).String(),
|
|
||||||
|
|
||||||
ClientID: params.ClientID,
|
|
||||||
ClientProto: params.ClientProto,
|
|
||||||
|
|
||||||
Result: *params.Result,
|
|
||||||
Upstream: params.Upstream,
|
|
||||||
|
|
||||||
IP: params.ClientIP,
|
|
||||||
|
|
||||||
Elapsed: params.Elapsed,
|
|
||||||
|
|
||||||
Cached: params.Cached,
|
|
||||||
AuthenticatedData: params.AuthenticatedData,
|
|
||||||
}
|
|
||||||
|
|
||||||
if params.ReqECS != nil {
|
|
||||||
entry.ReqECS = params.ReqECS.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
entry.addResponse(params.Answer, false)
|
|
||||||
entry.addResponse(params.OrigAnswer, true)
|
|
||||||
|
|
||||||
needFlush := false
|
needFlush := false
|
||||||
func() {
|
func() {
|
||||||
|
|
|
@ -6,7 +6,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
|
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
|
||||||
"github.com/AdguardTeam/dnsproxy/proxyutil"
|
|
||||||
"github.com/AdguardTeam/golibs/stringutil"
|
"github.com/AdguardTeam/golibs/stringutil"
|
||||||
"github.com/AdguardTeam/golibs/testutil"
|
"github.com/AdguardTeam/golibs/testutil"
|
||||||
"github.com/AdguardTeam/golibs/timeutil"
|
"github.com/AdguardTeam/golibs/timeutil"
|
||||||
|
@ -46,9 +45,10 @@ func TestQueryLog(t *testing.T) {
|
||||||
addEntry(l, "example.com", net.IPv4(1, 1, 1, 4), net.IPv4(2, 2, 2, 4))
|
addEntry(l, "example.com", net.IPv4(1, 1, 1, 4), net.IPv4(2, 2, 2, 4))
|
||||||
|
|
||||||
type tcAssertion struct {
|
type tcAssertion struct {
|
||||||
num int
|
host string
|
||||||
host string
|
answer net.IP
|
||||||
answer, client net.IP
|
client net.IP
|
||||||
|
num int
|
||||||
}
|
}
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
|
@ -367,6 +367,6 @@ func assertLogEntry(t *testing.T, entry *logEntry, host string, answer, client n
|
||||||
require.NoError(t, msg.Unpack(entry.Answer))
|
require.NoError(t, msg.Unpack(entry.Answer))
|
||||||
require.Len(t, msg.Answer, 1)
|
require.Len(t, msg.Answer, 1)
|
||||||
|
|
||||||
ip := proxyutil.IPFromRR(msg.Answer[0]).To16()
|
a := testutil.RequireTypeAssert[*dns.A](t, msg.Answer[0])
|
||||||
assert.Equal(t, answer, ip)
|
assert.Equal(t, answer, a.A.To16())
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,141 +12,181 @@ import (
|
||||||
"github.com/AdguardTeam/golibs/log"
|
"github.com/AdguardTeam/golibs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Timestamp not found errors.
|
|
||||||
const (
|
const (
|
||||||
ErrTSNotFound errors.Error = "ts not found"
|
// Timestamp not found errors.
|
||||||
ErrTSTooLate errors.Error = "ts too late"
|
errTSNotFound errors.Error = "ts not found"
|
||||||
ErrTSTooEarly errors.Error = "ts too early"
|
errTSTooLate errors.Error = "ts too late"
|
||||||
|
errTSTooEarly errors.Error = "ts too early"
|
||||||
|
|
||||||
|
// maxEntrySize is a maximum size of the entry.
|
||||||
|
//
|
||||||
|
// TODO: Find a way to grow buffer instead of relying on this value when
|
||||||
|
// reading strings.
|
||||||
|
maxEntrySize = 16 * 1024
|
||||||
|
|
||||||
|
// bufferSize should be enough for at least this number of entries.
|
||||||
|
bufferSize = 100 * maxEntrySize
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: Find a way to grow buffer instead of relying on this value when reading strings
|
// qLogFile represents a single query log file. It allows reading from the
|
||||||
const maxEntrySize = 16 * 1024
|
// file in the reverse order.
|
||||||
|
|
||||||
// buffer should be enough for at least this number of entries
|
|
||||||
const bufferSize = 100 * maxEntrySize
|
|
||||||
|
|
||||||
// QLogFile represents a single query log file
|
|
||||||
// It allows reading from the file in the reverse order
|
|
||||||
//
|
//
|
||||||
// Please note that this is a stateful object.
|
// Please note, that this is a stateful object. Internally, it contains a
|
||||||
// Internally, it contains a pointer to a specific position in the file,
|
// pointer to a specific position in the file, and it reads lines in reverse
|
||||||
// and it reads lines in reverse order starting from that position.
|
// order starting from that position.
|
||||||
type QLogFile struct {
|
type qLogFile struct {
|
||||||
file *os.File // the query log file
|
// file is the query log file.
|
||||||
position int64 // current position in the file
|
file *os.File
|
||||||
|
|
||||||
buffer []byte // buffer that we've read from the file
|
// buffer that we've read from the file.
|
||||||
bufferStart int64 // start of the buffer (in the file)
|
buffer []byte
|
||||||
bufferLen int // buffer len
|
|
||||||
|
|
||||||
lock sync.Mutex // We use mutex to make it thread-safe
|
// lock is a mutex to make it thread-safe.
|
||||||
|
lock sync.Mutex
|
||||||
|
|
||||||
|
// position is the position in the file.
|
||||||
|
position int64
|
||||||
|
|
||||||
|
// bufferStart is the start of the buffer (in the file).
|
||||||
|
bufferStart int64
|
||||||
|
|
||||||
|
// bufferLen is the length of the buffer.
|
||||||
|
bufferLen int
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewQLogFile initializes a new instance of the QLogFile
|
// newQLogFile initializes a new instance of the qLogFile.
|
||||||
func NewQLogFile(path string) (*QLogFile, error) {
|
func newQLogFile(path string) (qf *qLogFile, err error) {
|
||||||
f, err := os.OpenFile(path, os.O_RDONLY, 0o644)
|
f, err := os.OpenFile(path, os.O_RDONLY, 0o644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return &QLogFile{
|
return &qLogFile{file: f}, nil
|
||||||
file: f,
|
}
|
||||||
}, nil
|
|
||||||
|
// validateQLogLineIdx returns error if the line index is not valid to continue
|
||||||
|
// search.
|
||||||
|
func (q *qLogFile) validateQLogLineIdx(lineIdx, lastProbeLineIdx, ts, fSize int64) (err error) {
|
||||||
|
if lineIdx == lastProbeLineIdx {
|
||||||
|
if lineIdx == 0 {
|
||||||
|
return errTSTooEarly
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we're testing the same line twice then most likely the scope is
|
||||||
|
// too narrow and we won't find anything anymore in any other file.
|
||||||
|
return fmt.Errorf("looking up timestamp %d in %q: %w", ts, q.file.Name(), errTSNotFound)
|
||||||
|
} else if lineIdx == fSize {
|
||||||
|
return errTSTooLate
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// seekTS performs binary search in the query log file looking for a record
|
// seekTS performs binary search in the query log file looking for a record
|
||||||
// with the specified timestamp. Once the record is found, it sets
|
// with the specified timestamp. Once the record is found, it sets "position"
|
||||||
// "position" so that the next ReadNext call returned that record.
|
// so that the next ReadNext call returned that record.
|
||||||
//
|
//
|
||||||
// The algorithm is rather simple:
|
// The algorithm is rather simple:
|
||||||
// 1. It starts with the position in the middle of a file
|
// 1. It starts with the position in the middle of a file.
|
||||||
// 2. Shifts back to the beginning of the line
|
// 2. Shifts back to the beginning of the line.
|
||||||
// 3. Checks the log record timestamp
|
// 3. Checks the log record timestamp.
|
||||||
// 4. If it is lower than the timestamp we are looking for,
|
// 4. If it is lower than the timestamp we are looking for, it shifts seek
|
||||||
// it shifts seek position to 3/4 of the file. Otherwise, to 1/4 of the file.
|
// position to 3/4 of the file. Otherwise, to 1/4 of the file.
|
||||||
// 5. It performs the search again, every time the search scope is narrowed twice.
|
// 5. It performs the search again, every time the search scope is narrowed
|
||||||
|
// twice.
|
||||||
//
|
//
|
||||||
// Returns:
|
// Returns:
|
||||||
// * It returns the position of the the line with the timestamp we were looking for
|
// - It returns the position of the line with the timestamp we were looking
|
||||||
// so that when we call "ReadNext" this line was returned.
|
// for so that when we call "ReadNext" this line was returned.
|
||||||
// * Depth of the search (how many times we compared timestamps).
|
// - Depth of the search (how many times we compared timestamps).
|
||||||
// * If we could not find it, it returns one of the errors described above.
|
// - If we could not find it, it returns one of the errors described above.
|
||||||
func (q *QLogFile) seekTS(timestamp int64) (int64, int, error) {
|
func (q *qLogFile) seekTS(timestamp int64) (pos int64, depth int, err error) {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
// Empty the buffer
|
// Empty the buffer.
|
||||||
q.buffer = nil
|
q.buffer = nil
|
||||||
|
|
||||||
// First of all, check the file size
|
// First of all, check the file size.
|
||||||
fileInfo, err := q.file.Stat()
|
fileInfo, err := q.file.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, err
|
return 0, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Define the search scope
|
// Define the search scope.
|
||||||
start := int64(0) // start of the search interval (position in the file)
|
|
||||||
end := fileInfo.Size() // end of the search interval (position in the file)
|
// Start of the search interval (position in the file).
|
||||||
probe := (end - start) / 2 // probe -- approximate index of the line we'll try to check
|
start := int64(0)
|
||||||
|
// End of the search interval (position in the file).
|
||||||
|
end := fileInfo.Size()
|
||||||
|
// Probe is the approximate index of the line we'll try to check.
|
||||||
|
probe := (end - start) / 2
|
||||||
|
|
||||||
var line string
|
var line string
|
||||||
var lineIdx int64 // index of the probe line in the file
|
// Index of the probe line in the file.
|
||||||
|
var lineIdx int64
|
||||||
var lineEndIdx int64
|
var lineEndIdx int64
|
||||||
var lastProbeLineIdx int64 // index of the last probe line
|
// Index of the last probe line.
|
||||||
|
var lastProbeLineIdx int64
|
||||||
lastProbeLineIdx = -1
|
lastProbeLineIdx = -1
|
||||||
|
|
||||||
// Count seek depth in order to detect mistakes
|
// Count seek depth in order to detect mistakes. If depth is too large,
|
||||||
// If depth is too large, we should stop the search
|
// we should stop the search.
|
||||||
depth := 0
|
|
||||||
|
|
||||||
for {
|
for {
|
||||||
// Get the line at the specified position
|
// Get the line at the specified position.
|
||||||
line, lineIdx, lineEndIdx, err = q.readProbeLine(probe)
|
line, lineIdx, lineEndIdx, err = q.readProbeLine(probe)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, depth, err
|
return 0, depth, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if lineIdx == lastProbeLineIdx {
|
// Check if the line index if invalid.
|
||||||
if lineIdx == 0 {
|
err = q.validateQLogLineIdx(lineIdx, lastProbeLineIdx, timestamp, fileInfo.Size())
|
||||||
return 0, depth, ErrTSTooEarly
|
if err != nil {
|
||||||
}
|
return 0, depth, err
|
||||||
|
|
||||||
// If we're testing the same line twice then most likely
|
|
||||||
// the scope is too narrow and we won't find anything
|
|
||||||
// anymore in any other file.
|
|
||||||
return 0, depth, fmt.Errorf("looking up timestamp %d in %q: %w", timestamp, q.file.Name(), ErrTSNotFound)
|
|
||||||
} else if lineIdx == fileInfo.Size() {
|
|
||||||
return 0, depth, ErrTSTooLate
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the last found idx
|
// Save the last found idx.
|
||||||
lastProbeLineIdx = lineIdx
|
lastProbeLineIdx = lineIdx
|
||||||
|
|
||||||
// Get the timestamp from the query log record
|
// Get the timestamp from the query log record.
|
||||||
ts := readQLogTimestamp(line)
|
ts := readQLogTimestamp(line)
|
||||||
if ts == 0 {
|
if ts == 0 {
|
||||||
return 0, depth, fmt.Errorf("looking up timestamp %d in %q: record %q has empty timestamp", timestamp, q.file.Name(), line)
|
return 0, depth, fmt.Errorf(
|
||||||
|
"looking up timestamp %d in %q: record %q has empty timestamp",
|
||||||
|
timestamp,
|
||||||
|
q.file.Name(),
|
||||||
|
line,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ts == timestamp {
|
if ts == timestamp {
|
||||||
// Hurray, returning the result
|
// Hurray, returning the result.
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
// Narrow the scope and repeat the search
|
// Narrow the scope and repeat the search.
|
||||||
if ts > timestamp {
|
if ts > timestamp {
|
||||||
// If the timestamp we're looking for is OLDER than what we found
|
// If the timestamp we're looking for is OLDER than what we found,
|
||||||
// Then the line is somewhere on the LEFT side from the current probe position
|
// then the line is somewhere on the LEFT side from the current
|
||||||
|
// probe position.
|
||||||
end = lineIdx
|
end = lineIdx
|
||||||
} else {
|
} else {
|
||||||
// If the timestamp we're looking for is NEWER than what we found
|
// If the timestamp we're looking for is NEWER than what we found,
|
||||||
// Then the line is somewhere on the RIGHT side from the current probe position
|
// then the line is somewhere on the RIGHT side from the current
|
||||||
|
// probe position.
|
||||||
start = lineEndIdx
|
start = lineEndIdx
|
||||||
}
|
}
|
||||||
probe = start + (end-start)/2
|
probe = start + (end-start)/2
|
||||||
|
|
||||||
depth++
|
depth++
|
||||||
if depth >= 100 {
|
if depth >= 100 {
|
||||||
return 0, depth, fmt.Errorf("looking up timestamp %d in %q: depth %d too high: %w", timestamp, q.file.Name(), depth, ErrTSNotFound)
|
return 0, depth, fmt.Errorf(
|
||||||
|
"looking up timestamp %d in %q: depth %d too high: %w",
|
||||||
|
timestamp,
|
||||||
|
q.file.Name(),
|
||||||
|
depth,
|
||||||
|
errTSNotFound,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -154,37 +194,39 @@ func (q *QLogFile) seekTS(timestamp int64) (int64, int, error) {
|
||||||
return q.position, depth, nil
|
return q.position, depth, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SeekStart changes the current position to the end of the file
|
// SeekStart changes the current position to the end of the file. Please note,
|
||||||
// Please note that we're reading query log in the reverse order
|
// that we're reading query log in the reverse order and that's why log start
|
||||||
// and that's why log start is actually the end of file
|
// is actually the end of file.
|
||||||
//
|
//
|
||||||
// Returns nil if we were able to change the current position.
|
// Returns nil if we were able to change the current position. Returns error
|
||||||
// Returns error in any other case.
|
// in any other case.
|
||||||
func (q *QLogFile) SeekStart() (int64, error) {
|
func (q *qLogFile) SeekStart() (int64, error) {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
// Empty the buffer
|
// Empty the buffer.
|
||||||
q.buffer = nil
|
q.buffer = nil
|
||||||
|
|
||||||
// First of all, check the file size
|
// First of all, check the file size.
|
||||||
fileInfo, err := q.file.Stat()
|
fileInfo, err := q.file.Stat()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Place the position to the very end of file
|
// Place the position to the very end of file.
|
||||||
q.position = fileInfo.Size() - 1
|
q.position = fileInfo.Size() - 1
|
||||||
if q.position < 0 {
|
if q.position < 0 {
|
||||||
q.position = 0
|
q.position = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
return q.position, nil
|
return q.position, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadNext reads the next line (in the reverse order) from the file
|
// ReadNext reads the next line (in the reverse order) from the file and shifts
|
||||||
// and shifts the current position left to the next (actually prev) line.
|
// the current position left to the next (actually prev) line.
|
||||||
// returns io.EOF if there's nothing to read more
|
//
|
||||||
func (q *QLogFile) ReadNext() (string, error) {
|
// Returns io.EOF if there's nothing more to read.
|
||||||
|
func (q *qLogFile) ReadNext() (string, error) {
|
||||||
q.lock.Lock()
|
q.lock.Lock()
|
||||||
defer q.lock.Unlock()
|
defer q.lock.Unlock()
|
||||||
|
|
||||||
|
@ -197,35 +239,34 @@ func (q *QLogFile) ReadNext() (string, error) {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shift position
|
// Shift position.
|
||||||
if lineIdx == 0 {
|
if lineIdx == 0 {
|
||||||
q.position = 0
|
q.position = 0
|
||||||
} else {
|
} else {
|
||||||
// there's usually a line break before the line
|
// There's usually a line break before the line, so we should shift one
|
||||||
// so we should shift one more char left from the line
|
// more char left from the line "\nline".
|
||||||
// line\nline
|
|
||||||
q.position = lineIdx - 1
|
q.position = lineIdx - 1
|
||||||
}
|
}
|
||||||
return line, err
|
return line, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close frees the underlying resources
|
// Close frees the underlying resources.
|
||||||
func (q *QLogFile) Close() error {
|
func (q *qLogFile) Close() error {
|
||||||
return q.file.Close()
|
return q.file.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// readNextLine reads the next line from the specified position
|
// readNextLine reads the next line from the specified position. This line
|
||||||
// this line actually have to END on that position.
|
// actually have to END on that position.
|
||||||
//
|
//
|
||||||
// the algorithm is:
|
// The algorithm is:
|
||||||
// 1. check if we have the buffer initialized
|
// 1. Check if we have the buffer initialized.
|
||||||
// 2. if it is, scan it and look for the line there
|
// 2. If it is so, scan it and look for the line there.
|
||||||
// 3. if we cannot find the line there, read the prev chunk into the buffer
|
// 3. If we cannot find the line there, read the prev chunk into the buffer.
|
||||||
// 4. read the line from the buffer
|
// 4. Read the line from the buffer.
|
||||||
func (q *QLogFile) readNextLine(position int64) (string, int64, error) {
|
func (q *qLogFile) readNextLine(position int64) (string, int64, error) {
|
||||||
relativePos := position - q.bufferStart
|
relativePos := position - q.bufferStart
|
||||||
if q.buffer == nil || (relativePos < maxEntrySize && q.bufferStart != 0) {
|
if q.buffer == nil || (relativePos < maxEntrySize && q.bufferStart != 0) {
|
||||||
// Time to re-init the buffer
|
// Time to re-init the buffer.
|
||||||
err := q.initBuffer(position)
|
err := q.initBuffer(position)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", 0, err
|
return "", 0, err
|
||||||
|
@ -233,8 +274,7 @@ func (q *QLogFile) readNextLine(position int64) (string, int64, error) {
|
||||||
relativePos = position - q.bufferStart
|
relativePos = position - q.bufferStart
|
||||||
}
|
}
|
||||||
|
|
||||||
// Look for the end of the prev line
|
// Look for the end of the prev line, this is where we'll read from.
|
||||||
// This is where we'll read from
|
|
||||||
startLine := int64(0)
|
startLine := int64(0)
|
||||||
for i := relativePos - 1; i >= 0; i-- {
|
for i := relativePos - 1; i >= 0; i-- {
|
||||||
if q.buffer[i] == '\n' {
|
if q.buffer[i] == '\n' {
|
||||||
|
@ -245,18 +285,19 @@ func (q *QLogFile) readNextLine(position int64) (string, int64, error) {
|
||||||
|
|
||||||
line := string(q.buffer[startLine:relativePos])
|
line := string(q.buffer[startLine:relativePos])
|
||||||
lineIdx := q.bufferStart + startLine
|
lineIdx := q.bufferStart + startLine
|
||||||
|
|
||||||
return line, lineIdx, nil
|
return line, lineIdx, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// initBuffer initializes the QLogFile buffer.
|
// initBuffer initializes the qLogFile buffer. The goal is to read a chunk of
|
||||||
// the goal is to read a chunk of file that includes the line with the specified position.
|
// file that includes the line with the specified position.
|
||||||
func (q *QLogFile) initBuffer(position int64) error {
|
func (q *qLogFile) initBuffer(position int64) error {
|
||||||
q.bufferStart = int64(0)
|
q.bufferStart = int64(0)
|
||||||
if position > bufferSize {
|
if position > bufferSize {
|
||||||
q.bufferStart = position - bufferSize
|
q.bufferStart = position - bufferSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// Seek to this position
|
// Seek to this position.
|
||||||
_, err := q.file.Seek(q.bufferStart, io.SeekStart)
|
_, err := q.file.Seek(q.bufferStart, io.SeekStart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -271,34 +312,35 @@ func (q *QLogFile) initBuffer(position int64) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// readProbeLine reads a line that includes the specified position
|
// readProbeLine reads a line that includes the specified position. This
|
||||||
// this method is supposed to be used when we use binary search in the Seek method
|
// method is supposed to be used when we use binary search in the Seek method.
|
||||||
// in the case of consecutive reads, use readNext (it uses a better buffer)
|
// In the case of consecutive reads, use readNext, cause it uses better buffer.
|
||||||
func (q *QLogFile) readProbeLine(position int64) (string, int64, int64, error) {
|
func (q *qLogFile) readProbeLine(position int64) (string, int64, int64, error) {
|
||||||
// First of all, we should read a buffer that will include the query log line
|
// First of all, we should read a buffer that will include the query log
|
||||||
// In order to do this, we'll define the boundaries
|
// line. In order to do this, we'll define the boundaries.
|
||||||
seekPosition := int64(0)
|
seekPosition := int64(0)
|
||||||
relativePos := position // position relative to the buffer we're going to read
|
// Position relative to the buffer we're going to read.
|
||||||
|
relativePos := position
|
||||||
if position > maxEntrySize {
|
if position > maxEntrySize {
|
||||||
seekPosition = position - maxEntrySize
|
seekPosition = position - maxEntrySize
|
||||||
relativePos = maxEntrySize
|
relativePos = maxEntrySize
|
||||||
}
|
}
|
||||||
|
|
||||||
// Seek to this position
|
// Seek to this position.
|
||||||
_, err := q.file.Seek(seekPosition, io.SeekStart)
|
_, err := q.file.Seek(seekPosition, io.SeekStart)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", 0, 0, err
|
return "", 0, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// The buffer size is 2*maxEntrySize
|
// The buffer size is 2*maxEntrySize.
|
||||||
buffer := make([]byte, maxEntrySize*2)
|
buffer := make([]byte, maxEntrySize*2)
|
||||||
bufferLen, err := q.file.Read(buffer)
|
bufferLen, err := q.file.Read(buffer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", 0, 0, err
|
return "", 0, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now start looking for the new line character starting
|
// Now start looking for the new line character starting from the
|
||||||
// from the relativePos and going left
|
// relativePos and going left.
|
||||||
startLine := int64(0)
|
startLine := int64(0)
|
||||||
for i := relativePos - 1; i >= 0; i-- {
|
for i := relativePos - 1; i >= 0; i-- {
|
||||||
if buffer[i] == '\n' {
|
if buffer[i] == '\n' {
|
||||||
|
@ -306,7 +348,7 @@ func (q *QLogFile) readProbeLine(position int64) (string, int64, int64, error) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Looking for the end of line now
|
// Looking for the end of line now.
|
||||||
endLine := int64(bufferLen)
|
endLine := int64(bufferLen)
|
||||||
lineEndIdx := endLine + seekPosition
|
lineEndIdx := endLine + seekPosition
|
||||||
for i := relativePos; i < int64(bufferLen); i++ {
|
for i := relativePos; i < int64(bufferLen); i++ {
|
||||||
|
@ -317,13 +359,13 @@ func (q *QLogFile) readProbeLine(position int64) (string, int64, int64, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Finally we can return the string we were looking for
|
// Finally we can return the string we were looking for.
|
||||||
lineIdx := startLine + seekPosition
|
lineIdx := startLine + seekPosition
|
||||||
return string(buffer[startLine:endLine]), lineIdx, lineEndIdx, nil
|
return string(buffer[startLine:endLine]), lineIdx, lineEndIdx, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// readJSONvalue reads a JSON string in form of '"key":"value"'. prefix must be
|
// readJSONValue reads a JSON string in form of '"key":"value"'. prefix must
|
||||||
// of the form '"key":"' to generate less garbage.
|
// be of the form '"key":"' to generate less garbage.
|
||||||
func readJSONValue(s, prefix string) string {
|
func readJSONValue(s, prefix string) string {
|
||||||
i := strings.Index(s, prefix)
|
i := strings.Index(s, prefix)
|
||||||
if i == -1 {
|
if i == -1 {
|
||||||
|
@ -340,7 +382,7 @@ func readJSONValue(s, prefix string) string {
|
||||||
return s[start:end]
|
return s[start:end]
|
||||||
}
|
}
|
||||||
|
|
||||||
// readQLogTimestamp reads the timestamp field from the query log line
|
// readQLogTimestamp reads the timestamp field from the query log line.
|
||||||
func readQLogTimestamp(str string) int64 {
|
func readQLogTimestamp(str string) int64 {
|
||||||
val := readJSONValue(str, `"T":"`)
|
val := readJSONValue(str, `"T":"`)
|
||||||
if len(val) == 0 {
|
if len(val) == 0 {
|
||||||
|
@ -351,10 +393,12 @@ func readQLogTimestamp(str string) int64 {
|
||||||
log.Error("Couldn't find timestamp: %s", str)
|
log.Error("Couldn't find timestamp: %s", str)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
tm, err := time.Parse(time.RFC3339Nano, val)
|
tm, err := time.Parse(time.RFC3339Nano, val)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("Couldn't parse timestamp: %s", val)
|
log.Error("Couldn't parse timestamp: %s", val)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
return tm.UnixNano()
|
return tm.UnixNano()
|
||||||
}
|
}
|
||||||
|
|
|
@ -72,15 +72,15 @@ func prepareTestFiles(t *testing.T, filesNum, linesNum int) []string {
|
||||||
return files
|
return files
|
||||||
}
|
}
|
||||||
|
|
||||||
// newTestQLogFile creates new *QLogFile for tests and registers the required
|
// newTestQLogFile creates new *qLogFile for tests and registers the required
|
||||||
// cleanup functions.
|
// cleanup functions.
|
||||||
func newTestQLogFile(t *testing.T, linesNum int) (file *QLogFile) {
|
func newTestQLogFile(t *testing.T, linesNum int) (file *qLogFile) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
testFile := prepareTestFiles(t, 1, linesNum)[0]
|
testFile := prepareTestFiles(t, 1, linesNum)[0]
|
||||||
|
|
||||||
// Create the new QLogFile instance.
|
// Create the new qLogFile instance.
|
||||||
file, err := NewQLogFile(testFile)
|
file, err := newQLogFile(testFile)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.NotNil(t, file)
|
assert.NotNil(t, file)
|
||||||
|
@ -240,7 +240,7 @@ func TestQLogFile_SeekTS_bad(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getQLogFileLine(q *QLogFile, lineNumber int) (line string, err error) {
|
func getQLogFileLine(q *qLogFile, lineNumber int) (line string, err error) {
|
||||||
if _, err = q.SeekStart(); err != nil {
|
if _, err = q.SeekStart(); err != nil {
|
||||||
return line, err
|
return line, err
|
||||||
}
|
}
|
||||||
|
@ -256,7 +256,7 @@ func getQLogFileLine(q *QLogFile, lineNumber int) (line string, err error) {
|
||||||
|
|
||||||
// Check adding and loading (with filtering) entries from disk and memory.
|
// Check adding and loading (with filtering) entries from disk and memory.
|
||||||
func TestQLogFile(t *testing.T) {
|
func TestQLogFile(t *testing.T) {
|
||||||
// Create the new QLogFile instance.
|
// Create the new qLogFile instance.
|
||||||
q := newTestQLogFile(t, 2)
|
q := newTestQLogFile(t, 2)
|
||||||
|
|
||||||
// Seek to the start.
|
// Seek to the start.
|
||||||
|
@ -285,7 +285,7 @@ func TestQLogFile(t *testing.T) {
|
||||||
assert.Empty(t, line)
|
assert.Empty(t, line)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTestQLogFileData(t *testing.T, data string) (file *QLogFile) {
|
func newTestQLogFileData(t *testing.T, data string) (file *qLogFile) {
|
||||||
f, err := os.CreateTemp(t.TempDir(), "*.txt")
|
f, err := os.CreateTemp(t.TempDir(), "*.txt")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
testutil.CleanupAndRequireSuccess(t, f.Close)
|
testutil.CleanupAndRequireSuccess(t, f.Close)
|
||||||
|
@ -293,7 +293,7 @@ func NewTestQLogFileData(t *testing.T, data string) (file *QLogFile) {
|
||||||
_, err = f.WriteString(data)
|
_, err = f.WriteString(data)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
file, err = NewQLogFile(f.Name())
|
file, err = newQLogFile(f.Name())
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
testutil.CleanupAndRequireSuccess(t, file.Close)
|
testutil.CleanupAndRequireSuccess(t, file.Close)
|
||||||
|
|
||||||
|
@ -309,9 +309,9 @@ func TestQLog_Seek(t *testing.T) {
|
||||||
timestamp, _ := time.Parse(time.RFC3339Nano, "2020-08-31T18:44:25.376690873+03:00")
|
timestamp, _ := time.Parse(time.RFC3339Nano, "2020-08-31T18:44:25.376690873+03:00")
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
|
wantErr error
|
||||||
name string
|
name string
|
||||||
delta int
|
delta int
|
||||||
wantErr error
|
|
||||||
wantDepth int
|
wantDepth int
|
||||||
}{{
|
}{{
|
||||||
name: "ok",
|
name: "ok",
|
||||||
|
@ -321,12 +321,12 @@ func TestQLog_Seek(t *testing.T) {
|
||||||
}, {
|
}, {
|
||||||
name: "too_late",
|
name: "too_late",
|
||||||
delta: 2,
|
delta: 2,
|
||||||
wantErr: ErrTSTooLate,
|
wantErr: errTSTooLate,
|
||||||
wantDepth: 2,
|
wantDepth: 2,
|
||||||
}, {
|
}, {
|
||||||
name: "too_early",
|
name: "too_early",
|
||||||
delta: -2,
|
delta: -2,
|
||||||
wantErr: ErrTSTooEarly,
|
wantErr: errTSTooEarly,
|
||||||
wantDepth: 1,
|
wantDepth: 1,
|
||||||
}}
|
}}
|
||||||
|
|
||||||
|
@ -338,7 +338,7 @@ func TestQLog_Seek(t *testing.T) {
|
||||||
timestamp.Add(time.Second).Format(time.RFC3339Nano),
|
timestamp.Add(time.Second).Format(time.RFC3339Nano),
|
||||||
)
|
)
|
||||||
|
|
||||||
q := NewTestQLogFileData(t, data)
|
q := newTestQLogFileData(t, data)
|
||||||
|
|
||||||
_, depth, err := q.seekTS(timestamp.Add(time.Second * time.Duration(tc.delta)).UnixNano())
|
_, depth, err := q.seekTS(timestamp.Add(time.Second * time.Duration(tc.delta)).UnixNano())
|
||||||
require.Truef(t, errors.Is(err, tc.wantErr), "%v", err)
|
require.Truef(t, errors.Is(err, tc.wantErr), "%v", err)
|
||||||
|
|
|
@ -9,36 +9,36 @@ import (
|
||||||
"github.com/AdguardTeam/golibs/log"
|
"github.com/AdguardTeam/golibs/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
// QLogReader allows reading from multiple query log files in the reverse order.
|
// qLogReader allows reading from multiple query log files in the reverse
|
||||||
|
// order.
|
||||||
//
|
//
|
||||||
// Please note that this is a stateful object.
|
// Please note that this is a stateful object. Internally, it contains a
|
||||||
// Internally, it contains a pointer to a particular query log file, and
|
// pointer to a particular query log file, and to a specific position in this
|
||||||
// to a specific position in this file, and it reads lines in reverse order
|
// file, and it reads lines in reverse order starting from that position.
|
||||||
// starting from that position.
|
type qLogReader struct {
|
||||||
type QLogReader struct {
|
// qFiles is an array with the query log files. The order is from oldest
|
||||||
// qFiles - array with the query log files
|
// to newest.
|
||||||
// The order is - from oldest to newest
|
qFiles []*qLogFile
|
||||||
qFiles []*QLogFile
|
|
||||||
|
|
||||||
currentFile int // Index of the current file
|
// currentFile is the index of the current file.
|
||||||
|
currentFile int
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewQLogReader initializes a QLogReader instance
|
// newQLogReader initializes a qLogReader instance with the specified files.
|
||||||
// with the specified files
|
func newQLogReader(files []string) (*qLogReader, error) {
|
||||||
func NewQLogReader(files []string) (*QLogReader, error) {
|
qFiles := make([]*qLogFile, 0)
|
||||||
qFiles := make([]*QLogFile, 0)
|
|
||||||
|
|
||||||
for _, f := range files {
|
for _, f := range files {
|
||||||
q, err := NewQLogFile(f)
|
q, err := newQLogFile(f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, os.ErrNotExist) {
|
if errors.Is(err, os.ErrNotExist) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close what we've already opened.
|
// Close what we've already opened.
|
||||||
cerr := closeQFiles(qFiles)
|
cErr := closeQFiles(qFiles)
|
||||||
if cerr != nil {
|
if cErr != nil {
|
||||||
log.Debug("querylog: closing files: %s", cerr)
|
log.Debug("querylog: closing files: %s", cErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -47,31 +47,28 @@ func NewQLogReader(files []string) (*QLogReader, error) {
|
||||||
qFiles = append(qFiles, q)
|
qFiles = append(qFiles, q)
|
||||||
}
|
}
|
||||||
|
|
||||||
return &QLogReader{
|
return &qLogReader{qFiles: qFiles, currentFile: len(qFiles) - 1}, nil
|
||||||
qFiles: qFiles,
|
|
||||||
currentFile: (len(qFiles) - 1),
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// seekTS performs binary search of a query log record with the specified
|
// seekTS performs binary search of a query log record with the specified
|
||||||
// timestamp. If the record is found, it sets QLogReader's position to point to
|
// timestamp. If the record is found, it sets qLogReader's position to point
|
||||||
// that line, so that the next ReadNext call returned this line.
|
// to that line, so that the next ReadNext call returned this line.
|
||||||
func (r *QLogReader) seekTS(timestamp int64) (err error) {
|
func (r *qLogReader) seekTS(timestamp int64) (err error) {
|
||||||
for i := len(r.qFiles) - 1; i >= 0; i-- {
|
for i := len(r.qFiles) - 1; i >= 0; i-- {
|
||||||
q := r.qFiles[i]
|
q := r.qFiles[i]
|
||||||
_, _, err = q.seekTS(timestamp)
|
_, _, err = q.seekTS(timestamp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, ErrTSTooEarly) {
|
if errors.Is(err, errTSTooEarly) {
|
||||||
// Look at the next file, since we've reached the end of this
|
// Look at the next file, since we've reached the end of this
|
||||||
// one. If there is no next file, it's not found.
|
// one. If there is no next file, it's not found.
|
||||||
err = ErrTSNotFound
|
err = errTSNotFound
|
||||||
|
|
||||||
continue
|
continue
|
||||||
} else if errors.Is(err, ErrTSTooLate) {
|
} else if errors.Is(err, errTSTooLate) {
|
||||||
// Just seek to the start then. timestamp is probably between
|
// Just seek to the start then. timestamp is probably between
|
||||||
// the end of the previous one and the start of this one.
|
// the end of the previous one and the start of this one.
|
||||||
return r.SeekStart()
|
return r.SeekStart()
|
||||||
} else if errors.Is(err, ErrTSNotFound) {
|
} else if errors.Is(err, errTSNotFound) {
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
return fmt.Errorf("seekts: file at index %d: %w", i, err)
|
return fmt.Errorf("seekts: file at index %d: %w", i, err)
|
||||||
|
@ -80,7 +77,7 @@ func (r *QLogReader) seekTS(timestamp int64) (err error) {
|
||||||
|
|
||||||
// The search is finished, and the searched element has been found.
|
// The search is finished, and the searched element has been found.
|
||||||
// Update currentFile only, position is already set properly in
|
// Update currentFile only, position is already set properly in
|
||||||
// QLogFile.
|
// qLogFile.
|
||||||
r.currentFile = i
|
r.currentFile = i
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -93,13 +90,13 @@ func (r *QLogReader) seekTS(timestamp int64) (err error) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// SeekStart changes the current position to the end of the newest file
|
// SeekStart changes the current position to the end of the newest file.
|
||||||
// Please note that we're reading query log in the reverse order
|
// Please note that we're reading query log in the reverse order and that's why
|
||||||
// and that's why log start is actually the end of file
|
// the log starts actually at the end of file.
|
||||||
//
|
//
|
||||||
// Returns nil if we were able to change the current position.
|
// Returns nil if we were able to change the current position. Returns error
|
||||||
// Returns error in any other case.
|
// in any other cases.
|
||||||
func (r *QLogReader) SeekStart() error {
|
func (r *qLogReader) SeekStart() error {
|
||||||
if len(r.qFiles) == 0 {
|
if len(r.qFiles) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -110,10 +107,12 @@ func (r *QLogReader) SeekStart() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadNext reads the next line (in the reverse order) from the query log files.
|
// ReadNext reads the next line (in the reverse order) from the query log
|
||||||
// and shifts the current position left to the next (actually prev) line (or the next file).
|
// files. Then shifts the current position left to the next (actually prev)
|
||||||
// returns io.EOF if there's nothing to read more.
|
// line (or the next file).
|
||||||
func (r *QLogReader) ReadNext() (string, error) {
|
//
|
||||||
|
// Returns io.EOF if there is nothing more to read.
|
||||||
|
func (r *qLogReader) ReadNext() (string, error) {
|
||||||
if len(r.qFiles) == 0 {
|
if len(r.qFiles) == 0 {
|
||||||
return "", io.EOF
|
return "", io.EOF
|
||||||
}
|
}
|
||||||
|
@ -122,7 +121,7 @@ func (r *QLogReader) ReadNext() (string, error) {
|
||||||
q := r.qFiles[r.currentFile]
|
q := r.qFiles[r.currentFile]
|
||||||
line, err := q.ReadNext()
|
line, err := q.ReadNext()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Shift to the older file
|
// Shift to the older file.
|
||||||
r.currentFile--
|
r.currentFile--
|
||||||
if r.currentFile < 0 {
|
if r.currentFile < 0 {
|
||||||
break
|
break
|
||||||
|
@ -130,10 +129,10 @@ func (r *QLogReader) ReadNext() (string, error) {
|
||||||
|
|
||||||
q = r.qFiles[r.currentFile]
|
q = r.qFiles[r.currentFile]
|
||||||
|
|
||||||
// Set it's position to the start right away
|
// Set its position to the start right away.
|
||||||
_, err = q.SeekStart()
|
_, err = q.SeekStart()
|
||||||
|
|
||||||
// This is unexpected, return an error right away
|
// This is unexpected, return an error right away.
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -142,17 +141,17 @@ func (r *QLogReader) ReadNext() (string, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Nothing to read anymore
|
// Nothing to read anymore.
|
||||||
return "", io.EOF
|
return "", io.EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the QLogReader
|
// Close closes the qLogReader.
|
||||||
func (r *QLogReader) Close() error {
|
func (r *qLogReader) Close() error {
|
||||||
return closeQFiles(r.qFiles)
|
return closeQFiles(r.qFiles)
|
||||||
}
|
}
|
||||||
|
|
||||||
// closeQFiles - helper method to close multiple QLogFile instances
|
// closeQFiles is a helper method to close multiple qLogFile instances.
|
||||||
func closeQFiles(qFiles []*QLogFile) error {
|
func closeQFiles(qFiles []*qLogFile) error {
|
||||||
var errs []error
|
var errs []error
|
||||||
|
|
||||||
for _, q := range qFiles {
|
for _, q := range qFiles {
|
||||||
|
@ -163,7 +162,7 @@ func closeQFiles(qFiles []*QLogFile) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(errs) > 0 {
|
if len(errs) > 0 {
|
||||||
return errors.List("error while closing QLogReader", errs...)
|
return errors.List("error while closing qLogReader", errs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -10,15 +10,15 @@ import (
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
// newTestQLogReader creates new *QLogReader for tests and registers the
|
// newTestQLogReader creates new *qLogReader for tests and registers the
|
||||||
// required cleanup functions.
|
// required cleanup functions.
|
||||||
func newTestQLogReader(t *testing.T, filesNum, linesNum int) (reader *QLogReader) {
|
func newTestQLogReader(t *testing.T, filesNum, linesNum int) (reader *qLogReader) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
||||||
testFiles := prepareTestFiles(t, filesNum, linesNum)
|
testFiles := prepareTestFiles(t, filesNum, linesNum)
|
||||||
|
|
||||||
// Create the new QLogReader instance.
|
// Create the new qLogReader instance.
|
||||||
reader, err := NewQLogReader(testFiles)
|
reader, err := newQLogReader(testFiles)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.NotNil(t, reader)
|
assert.NotNil(t, reader)
|
||||||
|
@ -75,9 +75,9 @@ func TestQLogReader_Seek(t *testing.T) {
|
||||||
r := newTestQLogReader(t, 2, 10000)
|
r := newTestQLogReader(t, 2, 10000)
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
|
want error
|
||||||
name string
|
name string
|
||||||
time string
|
time string
|
||||||
want error
|
|
||||||
}{{
|
}{{
|
||||||
name: "not_too_old",
|
name: "not_too_old",
|
||||||
time: "2020-02-18T22:39:35.920973+03:00",
|
time: "2020-02-18T22:39:35.920973+03:00",
|
||||||
|
@ -97,7 +97,7 @@ func TestQLogReader_Seek(t *testing.T) {
|
||||||
}, {
|
}, {
|
||||||
name: "non-existent_long_ago",
|
name: "non-existent_long_ago",
|
||||||
time: "2000-02-19T01:23:16.920973+03:00",
|
time: "2000-02-19T01:23:16.920973+03:00",
|
||||||
want: ErrTSNotFound,
|
want: errTSNotFound,
|
||||||
}, {
|
}, {
|
||||||
name: "non-existent_far_ahead",
|
name: "non-existent_far_ahead",
|
||||||
time: "2100-02-19T01:23:16.920973+03:00",
|
time: "2100-02-19T01:23:16.920973+03:00",
|
||||||
|
@ -105,7 +105,7 @@ func TestQLogReader_Seek(t *testing.T) {
|
||||||
}, {
|
}, {
|
||||||
name: "non-existent_but_could",
|
name: "non-existent_but_could",
|
||||||
time: "2020-02-18T22:36:37.000000+03:00",
|
time: "2020-02-18T22:36:37.000000+03:00",
|
||||||
want: ErrTSNotFound,
|
want: errTSNotFound,
|
||||||
}}
|
}}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
|
@ -125,9 +125,9 @@ func TestQLogReader_ReadNext(t *testing.T) {
|
||||||
r := newTestQLogReader(t, filesNum, linesNum)
|
r := newTestQLogReader(t, filesNum, linesNum)
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
|
want error
|
||||||
name string
|
name string
|
||||||
start int
|
start int
|
||||||
want error
|
|
||||||
}{{
|
}{{
|
||||||
name: "ok",
|
name: "ok",
|
||||||
start: 0,
|
start: 0,
|
||||||
|
|
|
@ -1,9 +1,11 @@
|
||||||
package querylog
|
package querylog
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/AdguardTeam/golibs/errors"
|
||||||
"github.com/AdguardTeam/golibs/log"
|
"github.com/AdguardTeam/golibs/log"
|
||||||
"golang.org/x/exp/slices"
|
"golang.org/x/exp/slices"
|
||||||
)
|
)
|
||||||
|
@ -134,84 +136,112 @@ func (l *queryLog) search(params *searchParams) (entries []*logEntry, oldest tim
|
||||||
return entries, oldest
|
return entries, oldest
|
||||||
}
|
}
|
||||||
|
|
||||||
// searchFiles looks up log records from all log files. It optionally uses the
|
// seekRecord changes the current position to the next record older than the
|
||||||
// client cache, if provided. searchFiles does not scan more than
|
// provided parameter.
|
||||||
// maxFileScanEntries so callers may need to call it several times to get all
|
func (r *qLogReader) seekRecord(olderThan time.Time) (err error) {
|
||||||
// results. oldest and total are the time of the oldest processed entry and the
|
if olderThan.IsZero() {
|
||||||
// total number of processed entries, including discarded ones, correspondingly.
|
return r.SeekStart()
|
||||||
func (l *queryLog) searchFiles(
|
}
|
||||||
params *searchParams,
|
|
||||||
cache clientCache,
|
err = r.seekTS(olderThan.UnixNano())
|
||||||
) (entries []*logEntry, oldest time.Time, total int) {
|
if err == nil {
|
||||||
|
// Read to the next record, because we only need the one that goes
|
||||||
|
// after it.
|
||||||
|
_, err = r.ReadNext()
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// setQLogReader creates a reader with the specified files and sets the
|
||||||
|
// position to the next record older than the provided parameter.
|
||||||
|
func (l *queryLog) setQLogReader(olderThan time.Time) (qr *qLogReader, err error) {
|
||||||
files := []string{
|
files := []string{
|
||||||
l.logFile + ".1",
|
l.logFile + ".1",
|
||||||
l.logFile,
|
l.logFile,
|
||||||
}
|
}
|
||||||
|
|
||||||
r, err := NewQLogReader(files)
|
r, err := newQLogReader(files)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error("querylog: opening qlog reader: %s", err)
|
return nil, fmt.Errorf("opening qlog reader: %s", err)
|
||||||
|
|
||||||
return entries, oldest, 0
|
|
||||||
}
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
closeErr := r.Close()
|
|
||||||
if closeErr != nil {
|
|
||||||
log.Error("querylog: closing file: %s", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
if params.olderThan.IsZero() {
|
|
||||||
err = r.SeekStart()
|
|
||||||
} else {
|
|
||||||
err = r.seekTS(params.olderThan.UnixNano())
|
|
||||||
if err == nil {
|
|
||||||
// Read to the next record, because we only need the one that goes
|
|
||||||
// after it.
|
|
||||||
_, err = r.ReadNext()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = r.seekRecord(olderThan)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug("querylog: cannot seek to %s: %s", params.olderThan, err)
|
defer func() { err = errors.WithDeferred(err, r.Close()) }()
|
||||||
|
log.Debug("querylog: cannot seek to %s: %s", olderThan, err)
|
||||||
|
|
||||||
return entries, oldest, 0
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
totalLimit := params.offset + params.limit
|
return r, nil
|
||||||
oldestNano := int64(0)
|
}
|
||||||
|
|
||||||
// By default, we do not scan more than maxFileScanEntries at once. The
|
// readEntries reads entries from the reader to totalLimit. By default, we do
|
||||||
// idea is to make search calls faster so that the UI could handle it and
|
// not scan more than maxFileScanEntries at once. The idea is to make search
|
||||||
// show something quicker. This behavior can be overridden if
|
// calls faster so that the UI could handle it and show something quicker.
|
||||||
// maxFileScanEntries is set to 0.
|
// This behavior can be overridden if maxFileScanEntries is set to 0.
|
||||||
|
func (l *queryLog) readEntries(
|
||||||
|
r *qLogReader,
|
||||||
|
params *searchParams,
|
||||||
|
cache clientCache,
|
||||||
|
totalLimit int,
|
||||||
|
) (entries []*logEntry, oldestNano int64, total int) {
|
||||||
for total < params.maxFileScanEntries || params.maxFileScanEntries <= 0 {
|
for total < params.maxFileScanEntries || params.maxFileScanEntries <= 0 {
|
||||||
var e *logEntry
|
ent, ts, rErr := l.readNextEntry(r, params, cache)
|
||||||
var ts int64
|
if rErr != nil {
|
||||||
|
if rErr == io.EOF {
|
||||||
e, ts, err = l.readNextEntry(r, params, cache)
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
oldestNano = 0
|
oldestNano = 0
|
||||||
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Error("querylog: reading next entry: %s", err)
|
log.Error("querylog: reading next entry: %s", rErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
oldestNano = ts
|
oldestNano = ts
|
||||||
total++
|
total++
|
||||||
|
|
||||||
if e != nil {
|
if ent == nil {
|
||||||
entries = append(entries, e)
|
continue
|
||||||
if len(entries) == totalLimit {
|
}
|
||||||
break
|
|
||||||
}
|
entries = append(entries, ent)
|
||||||
|
if len(entries) == totalLimit {
|
||||||
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return entries, oldestNano, total
|
||||||
|
}
|
||||||
|
|
||||||
|
// searchFiles looks up log records from all log files. It optionally uses the
|
||||||
|
// client cache, if provided. searchFiles does not scan more than
|
||||||
|
// maxFileScanEntries so callers may need to call it several times to get all
|
||||||
|
// the results. oldest and total are the time of the oldest processed entry
|
||||||
|
// and the total number of processed entries, including discarded ones,
|
||||||
|
// correspondingly.
|
||||||
|
func (l *queryLog) searchFiles(
|
||||||
|
params *searchParams,
|
||||||
|
cache clientCache,
|
||||||
|
) (entries []*logEntry, oldest time.Time, total int) {
|
||||||
|
r, err := l.setQLogReader(params.olderThan)
|
||||||
|
if err != nil {
|
||||||
|
log.Error("querylog: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if r == nil {
|
||||||
|
return entries, oldest, 0
|
||||||
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
if closeErr := r.Close(); closeErr != nil {
|
||||||
|
log.Error("querylog: closing file: %s", closeErr)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
totalLimit := params.offset + params.limit
|
||||||
|
entries, oldestNano, total := l.readEntries(r, params, cache, totalLimit)
|
||||||
if oldestNano != 0 {
|
if oldestNano != 0 {
|
||||||
oldest = time.Unix(0, oldestNano)
|
oldest = time.Unix(0, oldestNano)
|
||||||
}
|
}
|
||||||
|
@ -243,11 +273,11 @@ func (f quickMatchClientFinder) findClient(clientID, ip string) (c *Client) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// readNextEntry reads the next log entry and checks if it matches the search
|
// readNextEntry reads the next log entry and checks if it matches the search
|
||||||
// criteria. It optionally uses the client cache, if provided. e is nil if the
|
// criteria. It optionally uses the client cache, if provided. e is nil if
|
||||||
// entry doesn't match the search criteria. ts is the timestamp of the
|
// the entry doesn't match the search criteria. ts is the timestamp of the
|
||||||
// processed entry.
|
// processed entry.
|
||||||
func (l *queryLog) readNextEntry(
|
func (l *queryLog) readNextEntry(
|
||||||
r *QLogReader,
|
r *qLogReader,
|
||||||
params *searchParams,
|
params *searchParams,
|
||||||
cache clientCache,
|
cache clientCache,
|
||||||
) (e *logEntry, ts int64, err error) {
|
) (e *logEntry, ts int64, err error) {
|
||||||
|
|
|
@ -2,18 +2,25 @@ package querylog
|
||||||
|
|
||||||
import "time"
|
import "time"
|
||||||
|
|
||||||
// searchParams represent the search query sent by the client
|
// searchParams represent the search query sent by the client.
|
||||||
type searchParams struct {
|
type searchParams struct {
|
||||||
// searchCriteria - list of search criteria that we use to get filter results
|
// olderThen represents a parameter for entries that are older than this
|
||||||
searchCriteria []searchCriterion
|
// parameter value. If not set, disregard it and return any value.
|
||||||
|
|
||||||
// olderThen - return entries that are older than this value
|
|
||||||
// if not set - disregard it and return any value
|
|
||||||
olderThan time.Time
|
olderThan time.Time
|
||||||
|
|
||||||
offset int // offset for the search
|
// searchCriteria is a list of search criteria that we use to get filter
|
||||||
limit int // limit the number of records returned
|
// results.
|
||||||
maxFileScanEntries int // maximum log entries to scan in query log files. if 0 - no limit
|
searchCriteria []searchCriterion
|
||||||
|
|
||||||
|
// offset for the search.
|
||||||
|
offset int
|
||||||
|
|
||||||
|
// limit the number of records returned.
|
||||||
|
limit int
|
||||||
|
|
||||||
|
// maxFileScanEntries is a maximum of log entries to scan in query log
|
||||||
|
// files. If not set, then no limit.
|
||||||
|
maxFileScanEntries int
|
||||||
}
|
}
|
||||||
|
|
||||||
// newSearchParams - creates an empty instance of searchParams
|
// newSearchParams - creates an empty instance of searchParams
|
||||||
|
|
|
@ -8,16 +8,16 @@ require (
|
||||||
github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28
|
github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28
|
||||||
github.com/kisielk/errcheck v1.6.3
|
github.com/kisielk/errcheck v1.6.3
|
||||||
github.com/kyoh86/looppointer v0.2.1
|
github.com/kyoh86/looppointer v0.2.1
|
||||||
github.com/securego/gosec/v2 v2.15.0
|
github.com/securego/gosec/v2 v2.16.0
|
||||||
golang.org/x/tools v0.8.0
|
golang.org/x/tools v0.9.3
|
||||||
golang.org/x/vuln v0.0.0-20230418010118-28ba02ac73db
|
golang.org/x/vuln v0.1.0
|
||||||
honnef.co/go/tools v0.4.3
|
honnef.co/go/tools v0.4.3
|
||||||
mvdan.cc/gofumpt v0.5.0
|
mvdan.cc/gofumpt v0.5.0
|
||||||
mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8
|
mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/BurntSushi/toml v1.2.1 // indirect
|
github.com/BurntSushi/toml v1.3.1 // indirect
|
||||||
github.com/google/go-cmp v0.5.9 // indirect
|
github.com/google/go-cmp v0.5.9 // indirect
|
||||||
github.com/google/uuid v1.3.0 // indirect
|
github.com/google/uuid v1.3.0 // indirect
|
||||||
github.com/gookit/color v1.5.3 // indirect
|
github.com/gookit/color v1.5.3 // indirect
|
||||||
|
@ -25,9 +25,9 @@ require (
|
||||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
|
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
|
||||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect
|
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect
|
||||||
golang.org/x/exp/typeparams v0.0.0-20230321023759-10a507213a29 // indirect
|
golang.org/x/exp/typeparams v0.0.0-20230522175609-2e198f4a06a1 // indirect
|
||||||
golang.org/x/mod v0.10.0 // indirect
|
golang.org/x/mod v0.10.0 // indirect
|
||||||
golang.org/x/sync v0.1.0 // indirect
|
golang.org/x/sync v0.2.0 // indirect
|
||||||
golang.org/x/sys v0.7.0 // indirect
|
golang.org/x/sys v0.8.0 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,17 +1,19 @@
|
||||||
github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
|
github.com/BurntSushi/toml v1.3.1 h1:rHnDkSK+/g6DlREUK73PkmIs60pqrnuduK+JmP++JmU=
|
||||||
github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
github.com/BurntSushi/toml v1.3.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||||
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
|
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
|
||||||
github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
|
github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
|
||||||
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
|
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
|
||||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
|
||||||
|
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
|
||||||
github.com/golangci/misspell v0.4.0 h1:KtVB/hTK4bbL/S6bs64rYyk8adjmh1BygbBiaAiX+a0=
|
github.com/golangci/misspell v0.4.0 h1:KtVB/hTK4bbL/S6bs64rYyk8adjmh1BygbBiaAiX+a0=
|
||||||
github.com/golangci/misspell v0.4.0/go.mod h1:W6O/bwV6lGDxUCChm2ykw9NQdd5bYd1Xkjo88UcWyJc=
|
github.com/golangci/misspell v0.4.0/go.mod h1:W6O/bwV6lGDxUCChm2ykw9NQdd5bYd1Xkjo88UcWyJc=
|
||||||
github.com/google/go-cmdtest v0.4.1-0.20220921163831-55ab3332a786 h1:rcv+Ippz6RAtvaGgKxc+8FQIpxHgsF+HBzPyYL2cyVU=
|
github.com/google/go-cmdtest v0.4.1-0.20220921163831-55ab3332a786 h1:rcv+Ippz6RAtvaGgKxc+8FQIpxHgsF+HBzPyYL2cyVU=
|
||||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||||
|
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
|
||||||
github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=
|
github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=
|
||||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
@ -29,13 +31,13 @@ github.com/kyoh86/nolint v0.0.1 h1:GjNxDEkVn2wAxKHtP7iNTrRxytRZ1wXxLV5j4XzGfRU=
|
||||||
github.com/kyoh86/nolint v0.0.1/go.mod h1:1ZiZZ7qqrZ9dZegU96phwVcdQOMKIqRzFJL3ewq9gtI=
|
github.com/kyoh86/nolint v0.0.1/go.mod h1:1ZiZZ7qqrZ9dZegU96phwVcdQOMKIqRzFJL3ewq9gtI=
|
||||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA=
|
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA=
|
||||||
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
|
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
|
||||||
github.com/onsi/ginkgo/v2 v2.8.0 h1:pAM+oBNPrpXRs+E/8spkeGx9QgekbRVyr74EUvRVOUI=
|
github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
|
||||||
github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q=
|
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
|
||||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||||
github.com/securego/gosec/v2 v2.15.0 h1:v4Ym7FF58/jlykYmmhZ7mTm7FQvN/setNm++0fgIAtw=
|
github.com/securego/gosec/v2 v2.16.0 h1:Pi0JKoasQQ3NnoRao/ww/N/XdynIB9NRYYZT5CyOs5U=
|
||||||
github.com/securego/gosec/v2 v2.15.0/go.mod h1:VOjTrZOkUtSDt2QLSJmQBMWnvwiQPEjg0l+5juIqGk8=
|
github.com/securego/gosec/v2 v2.16.0/go.mod h1:xvLcVZqUfo4aAQu56TNv7/Ltz6emAOQAEsrZrt7uGlI=
|
||||||
github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||||
|
@ -49,8 +51,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
|
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
|
||||||
golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||||
golang.org/x/exp/typeparams v0.0.0-20230321023759-10a507213a29 h1:e7LhZmJ631l59keHP9ssC3sgSn3/oiEHKHKXDkimURY=
|
golang.org/x/exp/typeparams v0.0.0-20230522175609-2e198f4a06a1 h1:pnP8r+W8Fm7XJ8CWtXi4S9oJmPBTrkfYN/dNbaPj6Y4=
|
||||||
golang.org/x/exp/typeparams v0.0.0-20230321023759-10a507213a29/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
golang.org/x/exp/typeparams v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||||
|
@ -62,12 +64,12 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
|
||||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
|
golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
|
||||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
@ -77,23 +79,23 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
|
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||||
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
|
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20201007032633-0806396f153e/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
|
golang.org/x/tools v0.0.0-20201007032633-0806396f153e/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
|
||||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
|
||||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
|
||||||
golang.org/x/vuln v0.0.0-20230418010118-28ba02ac73db h1:tLxfII6jPR3mfwEMkyOakawu+Lldo9hIA7vliXnDZYg=
|
golang.org/x/vuln v0.1.0 h1:9GRdj6wAIkDrsMevuolY+SXERPjQPp2P1ysYA0jpZe0=
|
||||||
golang.org/x/vuln v0.0.0-20230418010118-28ba02ac73db/go.mod h1:64LpnL2PuSMzFYeCmJjYiRbroOUG9aCZYznINnF5PHE=
|
golang.org/x/vuln v0.1.0/go.mod h1:/YuzZYjGbwB8y19CisAppfyw3uTZnuCz3r+qgx/QRzU=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
|
|
|
@ -4,6 +4,28 @@
|
||||||
|
|
||||||
## v0.108.0: API changes
|
## v0.108.0: API changes
|
||||||
|
|
||||||
|
## v0.107.30: API changes
|
||||||
|
|
||||||
|
### `POST /control/version.json` and `GET /control/dhcp/interfaces` content type
|
||||||
|
|
||||||
|
* The value of the `Content-Type` header in the `POST /control/version.json` and
|
||||||
|
`GET /control/dhcp/interfaces` HTTP APIs is now correctly set to
|
||||||
|
`application/json` as opposed to `text/plain`.
|
||||||
|
|
||||||
|
### New HTTP API 'PUT /control/rewrite/update'
|
||||||
|
|
||||||
|
* The new `PUT /control/rewrite/update` HTTP API allows rewrite rule updates.
|
||||||
|
It accepts a JSON object with the following format:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"target": {"domain":"example.com","answer":"answer-to-update"},
|
||||||
|
"update": {"domain":"example.com","answer":"new-answer"}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## v0.107.29: API changes
|
## v0.107.29: API changes
|
||||||
|
|
||||||
### `GET /control/clients` And `GET /control/clients/find`
|
### `GET /control/clients` And `GET /control/clients/find`
|
||||||
|
@ -16,6 +38,8 @@
|
||||||
set AdGuard Home will use default value (false). It can be changed in the
|
set AdGuard Home will use default value (false). It can be changed in the
|
||||||
future versions.
|
future versions.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## v0.107.27: API changes
|
## v0.107.27: API changes
|
||||||
|
|
||||||
### The new optional fields `"edns_cs_use_custom"` and `"edns_cs_custom_ip"` in `DNSConfig`
|
### The new optional fields `"edns_cs_use_custom"` and `"edns_cs_custom_ip"` in `DNSConfig`
|
||||||
|
|
|
@ -35,7 +35,7 @@ set -f -u
|
||||||
go_version="$( "${GO:-go}" version )"
|
go_version="$( "${GO:-go}" version )"
|
||||||
readonly go_version
|
readonly go_version
|
||||||
|
|
||||||
go_min_version='go1.19.8'
|
go_min_version='go1.19.10'
|
||||||
go_version_msg="
|
go_version_msg="
|
||||||
warning: your go version (${go_version}) is different from the recommended minimal one (${go_min_version}).
|
warning: your go version (${go_version}) is different from the recommended minimal one (${go_min_version}).
|
||||||
if you have the version installed, please set the GO environment variable.
|
if you have the version installed, please set the GO environment variable.
|
||||||
|
@ -159,30 +159,7 @@ run_linter "$GO" vet ./...
|
||||||
|
|
||||||
run_linter govulncheck ./...
|
run_linter govulncheck ./...
|
||||||
|
|
||||||
# Apply more lax standards to the code we haven't properly refactored yet.
|
run_linter gocyclo --over 10 .
|
||||||
run_linter gocyclo --over 13\
|
|
||||||
./internal/dhcpd\
|
|
||||||
./internal/home/\
|
|
||||||
./internal/querylog/\
|
|
||||||
;
|
|
||||||
|
|
||||||
# Apply the normal standards to new or somewhat refactored code.
|
|
||||||
run_linter gocyclo --over 10\
|
|
||||||
./internal/aghio/\
|
|
||||||
./internal/aghnet/\
|
|
||||||
./internal/aghos/\
|
|
||||||
./internal/aghtest/\
|
|
||||||
./internal/dnsforward/\
|
|
||||||
./internal/filtering/\
|
|
||||||
./internal/stats/\
|
|
||||||
./internal/tools/\
|
|
||||||
./internal/updater/\
|
|
||||||
./internal/version/\
|
|
||||||
./scripts/blocked-services/\
|
|
||||||
./scripts/vetted-filters/\
|
|
||||||
./scripts/translations/\
|
|
||||||
./main.go\
|
|
||||||
;
|
|
||||||
|
|
||||||
run_linter ineffassign ./...
|
run_linter ineffassign ./...
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue