diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 55722840..cad8b10a 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -1,7 +1,7 @@
'name': 'build'
'env':
- 'GO_VERSION': '1.19.8'
+ 'GO_VERSION': '1.19.10'
'NODE_VERSION': '14'
'on':
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 8540e4c9..2d46bf97 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -1,7 +1,7 @@
'name': 'lint'
'env':
- 'GO_VERSION': '1.19.8'
+ 'GO_VERSION': '1.19.10'
'on':
'push':
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 362bee91..ef6eb701 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -14,21 +14,57 @@ and this project adheres to
+### Added
+
+- The ability to edit rewrite rules via `PUT /control/rewrite/update` HTTP API
+ ([#1577]).
+
+[#1577]: https://github.com/AdguardTeam/AdGuardHome/issues/1577
+
+## [v0.107.30] - 2023-06-07
+
+See also the [v0.107.30 GitHub milestone][ms-v0.107.30].
+
+### Security
+
+- Go version has been updated to prevent the possibility of exploiting the
+ CVE-2023-29402, CVE-2023-29403, and CVE-2023-29404 Go vulnerabilities fixed in
+ [Go 1.19.10][go-1.19.10].
+
+### Fixed
+
+- Unquoted IPv6 bind hosts with trailing colons erroneously considered
+ unspecified addresses are now properly validated ([#5752]).
+
+ **NOTE:** the Docker healthcheck script now also doesn't interpret the `""`
+ value as unspecified address.
+- Incorrect `Content-Type` header value in `POST /control/version.json` and `GET
+ /control/dhcp/interfaces` HTTP APIs ([#5716]).
+- Provided bootstrap servers are now used to resolve the hostnames of plain
+ UDP/TCP upstream servers.
+
+[#5716]: https://github.com/AdguardTeam/AdGuardHome/issues/5716
+
+[go-1.19.10]: https://groups.google.com/g/golang-announce/c/q5135a9d924/m/j0ZoAJOHAwAJ
+[ms-v0.107.30]: https://github.com/AdguardTeam/AdGuardHome/milestone/66?closed=1
+
+
+
## [v0.107.29] - 2023-04-18
See also the [v0.107.29 GitHub milestone][ms-v0.107.29].
@@ -55,6 +91,7 @@ See also the [v0.107.29 GitHub milestone][ms-v0.107.29].
[#5712]: https://github.com/AdguardTeam/AdGuardHome/issues/5712
[#5721]: https://github.com/AdguardTeam/AdGuardHome/issues/5721
[#5725]: https://github.com/AdguardTeam/AdGuardHome/issues/5725
+[#5752]: https://github.com/AdguardTeam/AdGuardHome/issues/5752
[ms-v0.107.29]: https://github.com/AdguardTeam/AdGuardHome/milestone/65?closed=1
@@ -1949,11 +1986,12 @@ See also the [v0.104.2 GitHub milestone][ms-v0.104.2].
-[Unreleased]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.29...HEAD
+[Unreleased]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.30...HEAD
+[v0.107.30]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.29...v0.107.30
[v0.107.29]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.28...v0.107.29
[v0.107.28]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.27...v0.107.28
[v0.107.27]: https://github.com/AdguardTeam/AdGuardHome/compare/v0.107.26...v0.107.27
diff --git a/README.md b/README.md
index 4c011b78..623ef2bf 100644
--- a/README.md
+++ b/README.md
@@ -466,6 +466,10 @@ bug or implementing the feature.
Home](https://github.com/ebrianne/adguard-exporter) by
[@ebrianne](https://github.com/ebrianne).
+ * [Terminal-based, real-time traffic monitoring and statistics for your AdGuard Home
+ instance](https://github.com/Lissy93/AdGuardian-Term) by
+ [@Lissy93](https://github.com/Lissy93)
+
* [AdGuard Home on GLInet
routers](https://forum.gl-inet.com/t/adguardhome-on-gl-routers/10664) by
[Gl-Inet](https://gl-inet.com/).
diff --git a/bamboo-specs/release.yaml b/bamboo-specs/release.yaml
index 56fed9b1..20dbe9b5 100644
--- a/bamboo-specs/release.yaml
+++ b/bamboo-specs/release.yaml
@@ -7,7 +7,7 @@
# Make sure to sync any changes with the branch overrides below.
'variables':
'channel': 'edge'
- 'dockerGo': 'adguard/golang-ubuntu:6.3'
+ 'dockerGo': 'adguard/golang-ubuntu:6.7'
'stages':
- 'Build frontend':
@@ -232,25 +232,24 @@
case "$channel"
in
('release')
- snapchannel='candidate'
- ;;
+ snapchannel='candidate'
+ ;;
('beta')
- snapchannel='beta'
- ;;
+ snapchannel='beta'
+ ;;
('edge')
- snapchannel='edge'
- ;;
+ snapchannel='edge'
+ ;;
(*)
- echo "invalid channel '$channel'"
- exit 1
- ;;
+ echo "invalid channel '$channel'"
+ exit 1
+ ;;
esac
env\
SNAPCRAFT_CHANNEL="$snapchannel"\
SNAPCRAFT_EMAIL="${bamboo.snapcraftEmail}"\
- SNAPCRAFT_MACAROON="${bamboo.snapcraftMacaroonPassword}"\
- SNAPCRAFT_UBUNTU_DISCHARGE="${bamboo.snapcraftUbuntuDischargePassword}"\
+ SNAPCRAFT_STORE_CREDENTIALS="${bamboo.snapcraftMacaroonPassword}"\
../bamboo-deploy-publisher/deploy.sh adguard-home-snap
'final-tasks':
- 'clean'
@@ -280,9 +279,9 @@
if [ "$channel" != 'release' ] && [ "${channel}" != 'beta' ]
then
- echo "don't publish to GitHub Releases for this channel"
+ echo "don't publish to GitHub Releases for this channel"
- exit 0
+ exit 0
fi
cd ./dist/
@@ -331,7 +330,7 @@
# need to build a few of these.
'variables':
'channel': 'beta'
- 'dockerGo': 'adguard/golang-ubuntu:6.3'
+ 'dockerGo': 'adguard/golang-ubuntu:6.7'
# release-vX.Y.Z branches are the branches from which the actual final release
# is built.
- '^release-v[0-9]+\.[0-9]+\.[0-9]+':
@@ -346,4 +345,4 @@
# are the ones that actually get released.
'variables':
'channel': 'release'
- 'dockerGo': 'adguard/golang-ubuntu:6.3'
+ 'dockerGo': 'adguard/golang-ubuntu:6.7'
diff --git a/bamboo-specs/test.yaml b/bamboo-specs/test.yaml
index 4a54c980..abfa4566 100644
--- a/bamboo-specs/test.yaml
+++ b/bamboo-specs/test.yaml
@@ -5,7 +5,7 @@
'key': 'AHBRTSPECS'
'name': 'AdGuard Home - Build and run tests'
'variables':
- 'dockerGo': 'adguard/golang-ubuntu:6.3'
+ 'dockerGo': 'adguard/golang-ubuntu:6.7'
'stages':
- 'Tests':
diff --git a/client/src/__locales/be.json b/client/src/__locales/be.json
index 0ad7119a..1affd298 100644
--- a/client/src/__locales/be.json
+++ b/client/src/__locales/be.json
@@ -150,7 +150,7 @@
"dns_allowlists": "Белыя спісы DNS",
"dns_blocklists_desc": "AdGuard Home будзе блакаваць дамены з чорных спісаў.",
"dns_allowlists_desc": "Дамены з белых спісаў DNS будуць дазволены, нават калі яны знаходзяцца ў любым з чорных спісаў.",
- "custom_filtering_rules": "Карыстацкія правілы фільтрацыі",
+ "custom_filtering_rules": "Карыстальніцкія правілы фільтрацыі",
"encryption_settings": "Налады шыфравання",
"dhcp_settings": "Налады DHCP",
"upstream_dns": "Upstream DNS-серверы",
@@ -247,7 +247,7 @@
"loading_table_status": "Загрузка...",
"page_table_footer_text": "Старонка",
"rows_table_footer_text": "радкоў",
- "updated_custom_filtering_toast": "Занесены змены ў карыстацкія правілы",
+ "updated_custom_filtering_toast": "Карыстальніцкія правілы паспяхова захаваны",
"rule_removed_from_custom_filtering_toast": "Карыстацкае правіла выдалена: {{rule}}",
"rule_added_to_custom_filtering_toast": "Карыстацкае правіла дададзена: {{rule}}",
"query_log_response_status": "Статус: {{value}}",
@@ -568,7 +568,7 @@
"check_desc": "Праверыць фільтрацыю імя хаста",
"check": "Праверыць",
"form_enter_host": "Увядзіце імя хаста",
- "filtered_custom_rules": "Адфільтраваны з дапамогай карыстацкіх правілаў фільтрацыі",
+ "filtered_custom_rules": "Адфільтраваны з дапамогай карыстальніцкіх правіл фільтрацыі",
"choose_from_list": "Абраць са спіса",
"add_custom_list": "Дадаць свой спіс",
"host_whitelisted": "Хост занесены ў белы спіс",
diff --git a/client/src/__locales/fa.json b/client/src/__locales/fa.json
index 0f499866..6b3adb28 100644
--- a/client/src/__locales/fa.json
+++ b/client/src/__locales/fa.json
@@ -268,6 +268,8 @@
"blocking_mode_nxdomain": "NXDOMAIN: پاسخ با کُد NXDOMAIN",
"blocking_mode_null_ip": "Null IP: پاسخ با آدرس آی پی صفر(0.0.0.0 برای A; :: برای AAAA)",
"blocking_mode_custom_ip": "آی پی دستی: پاسخ با آدرس آی پی دستی تنظیم شده",
+ "theme_light": "پوسته روشن",
+ "theme_dark": "پوسته تیره",
"upstream_dns_client_desc": "اگر این فیلد را خالی نگه دارید، AdGuard Home از سرور پیکربندی شده در <0> تنظیماتDNS 0> استفاده می کند.",
"tracker_source": "منبع ردیاب",
"source_label": "منبع",
@@ -567,5 +569,6 @@
"use_saved_key": "از کلید ذخیره شده قبلی استفاده کنید",
"parental_control": "نظارت والدین",
"safe_browsing": "وب گردی اَمن",
- "form_error_password_length": "رمزعبور باید حداقل {{value}} کاراکتر باشد."
+ "form_error_password_length": "رمزعبور باید حداقل {{value}} کاراکتر باشد.",
+ "protection_section_label": "حفاظت"
}
diff --git a/client/src/__locales/fi.json b/client/src/__locales/fi.json
index 89cc2444..1a4c04d5 100644
--- a/client/src/__locales/fi.json
+++ b/client/src/__locales/fi.json
@@ -86,7 +86,7 @@
"request_details": "Pyynnön tiedot",
"client_details": "Päätelaitteen tiedot",
"details": "Yksityiskohdat",
- "back": "Takaisin",
+ "back": "Palaa takaisin",
"dashboard": "Tila",
"settings": "Asetukset",
"filters": "Suodattimet",
@@ -146,8 +146,8 @@
"no_servers_specified": "Palvelimia ei ole määritetty",
"general_settings": "Yleiset asetukset",
"dns_settings": "DNS-asetukset",
- "dns_blocklists": "DNS-estolistat",
- "dns_allowlists": "DNS-sallittujen listat",
+ "dns_blocklists": "DNS-estot",
+ "dns_allowlists": "DNS-sallinnat",
"dns_blocklists_desc": "AdGuard Home estää estolistalla olevat verkkotunnukset.",
"dns_allowlists_desc": "DNS-sallittujen listalla olevat verkkotunnukset sallitaan myös silloin, jos ne ovat jollain muulla estolistalla.",
"custom_filtering_rules": "Omat suodatussäännöt",
@@ -627,7 +627,7 @@
"cache_optimistic": "Optimistinen välimuisti",
"cache_optimistic_desc": "Pakota AdGuard Home vastaamaan välimuistista vaikka tiedot olisivat vanhentuneet. Pyri samalla myös päivittämään tiedot.",
"filter_category_general": "Yleiset",
- "filter_category_security": "Turvallisuus",
+ "filter_category_security": "Tietoturva",
"filter_category_regional": "Alueelliset",
"filter_category_other": "Muut",
"filter_category_general_desc": "Listat, jotka estävät seurannan ja mainokset useimmilla laitteilla",
diff --git a/client/src/__locales/no.json b/client/src/__locales/no.json
index 3d3c91d6..7f31ba9b 100644
--- a/client/src/__locales/no.json
+++ b/client/src/__locales/no.json
@@ -282,6 +282,8 @@
"blocking_mode_null_ip": "Null IP: Svar med en 0-IP-adresse (0.0.0.0 for A; :: for AAAA)",
"blocking_mode_custom_ip": "Tilpasset IP: Svar med en manuelt valgt IP-adresse",
"theme_auto": "Auto",
+ "theme_light": "Lyst tema",
+ "theme_dark": "Mørkt tema",
"upstream_dns_client_desc": "Hvis dette feltet holdes tomt, vil AdGuard Home bruke tjenerne som er satt opp i <0>DNS-innstillingene0>.",
"tracker_source": "Sporerkilde",
"source_label": "Kilde",
diff --git a/client/src/__locales/pl.json b/client/src/__locales/pl.json
index 096fb012..0ed2c077 100644
--- a/client/src/__locales/pl.json
+++ b/client/src/__locales/pl.json
@@ -222,7 +222,7 @@
"all_lists_up_to_date_toast": "Wszystkie listy są już aktualne",
"updated_upstream_dns_toast": "Serwery nadrzędne zostały pomyślnie zapisane",
"dns_test_ok_toast": "Określone serwery DNS działają poprawnie",
- "dns_test_not_ok_toast": "Serwer \"{{key}}\": nie można go użyć, sprawdź, czy napisałeś go poprawnie",
+ "dns_test_not_ok_toast": "Serwer \"{{key}}\": nie może być użyte, sprawdź, czy zapisano go poprawnie",
"dns_test_warning_toast": "Upstream \"{{key}}\" nie odpowiada na zapytania testowe i może nie działać prawidłowo",
"unblock": "Odblokuj",
"block": "Zablokuj",
@@ -346,7 +346,7 @@
"install_devices_windows_list_2": "Przejdź do kategorii Sieć i Internet, a następnie do Centrum sieci i udostępniania.",
"install_devices_windows_list_3": "W lewym panelu kliknij \"Zmień ustawienia adaptera\".",
"install_devices_windows_list_4": "Kliknij prawym przyciskiem myszy aktywne połączenie i wybierz Właściwości.",
- "install_devices_windows_list_5": "Znajdź na liście \"Protokół internetowy w wersji 4 (TCP/IPv4)\" (lub w przypadku IPv6 \"Protokół internetowy w wersji 6 (TCP/IPv6)\"), zaznacz go i ponownie kliknij na Właściwości.",
+ "install_devices_windows_list_5": "Znajdź na liście \"Protokół internetowy w wersji 4 (TCP/IPv4)\" (lub w przypadku IPv6 \"Protokół internetowy w wersji 6 (TCP/IPv6)\"), zaznacz go i ponownie kliknij Właściwości.",
"install_devices_windows_list_6": "Wybierz opcję \"Użyj następujących adresów serwerów DNS\" i wprowadź adresy serwerów AdGuard Home.",
"install_devices_macos_list_1": "Kliknij ikonę Apple i przejdź do Preferencje systemowe.",
"install_devices_macos_list_2": "Kliknij Sieć.",
@@ -396,7 +396,7 @@
"encryption_issuer": "Zgłaszający",
"encryption_hostnames": "Nazwy hostów",
"encryption_reset": "Czy na pewno chcesz zresetować ustawienia szyfrowania?",
- "encryption_warning": "Uwaga!",
+ "encryption_warning": "Uwaga",
"topline_expiring_certificate": "Twój certyfikat SSL wkrótce wygaśnie. Zaktualizuj <0>Ustawienia szyfrowania0>.",
"topline_expired_certificate": "Twój certyfikat SSL wygasł. Zaktualizuj <0>Ustawienia szyfrowania0>.",
"form_error_port_range": "Wpisz numer portu z zakresu 80-65535",
@@ -542,7 +542,7 @@
"password_placeholder": "Wpisz hasło",
"sign_in": "Zaloguj się",
"sign_out": "Wyloguj się",
- "forgot_password": "Zapomniałeś hasła?",
+ "forgot_password": "Zapomniano hasła?",
"forgot_password_desc": "Wykonaj <0>te kroki0>, aby utworzyć nowe hasło do konta użytkownika.",
"location": "Lokalizacja",
"orgname": "Nazwa firmy",
diff --git a/client/src/__locales/pt-br.json b/client/src/__locales/pt-br.json
index c85234f4..c0650dd2 100644
--- a/client/src/__locales/pt-br.json
+++ b/client/src/__locales/pt-br.json
@@ -529,7 +529,7 @@
"ignore_domains": "Domínios ignorados (separados por nova linha)",
"ignore_domains_title": "Domínios ignorados",
"ignore_domains_desc_stats": "As consultas para esses domínios não são gravadas nas estatísticas",
- "ignore_domains_desc_query": "As consultas para esses domínios não são gravadas no log de consulta",
+ "ignore_domains_desc_query": "As consultas para esses domínios não são gravadas no registro de consulta",
"interval_hours": "{{count}} hora",
"interval_hours_plural": "{{count}} horas",
"filters_configuration": "Configuração de filtros",
diff --git a/client/src/__locales/pt-pt.json b/client/src/__locales/pt-pt.json
index 44eac323..18d2e176 100644
--- a/client/src/__locales/pt-pt.json
+++ b/client/src/__locales/pt-pt.json
@@ -529,7 +529,7 @@
"ignore_domains": "Domínios ignorados (separados por nova linha)",
"ignore_domains_title": "Domínios ignorados",
"ignore_domains_desc_stats": "As consultas para estes domínios não aparecem nas estatísticas",
- "ignore_domains_desc_query": "As consultas para estes domínios nãoaparecem no registo de consultas",
+ "ignore_domains_desc_query": "As consultas para estes domínios não aparecem no registo de consultas",
"interval_hours": "{{count}} hora",
"interval_hours_plural": "{{count}} horas",
"filters_configuration": "Definição dos filtros",
diff --git a/client/src/__locales/uk.json b/client/src/__locales/uk.json
index 9b8fae71..1d1718a0 100644
--- a/client/src/__locales/uk.json
+++ b/client/src/__locales/uk.json
@@ -167,6 +167,7 @@
"enabled_parental_toast": "«Батьківський контроль» увімкнено",
"disabled_safe_search_toast": "Безпечний пошук вимкнено",
"enabled_save_search_toast": "Безпечний пошук увімкнено",
+ "updated_save_search_toast": "Налаштування Безпечного пошуку оновлено",
"enabled_table_header": "Увімкнено",
"name_table_header": "Назва",
"list_url_table_header": "URL списку",
@@ -290,6 +291,8 @@
"rate_limit": "Обмеження швидкості",
"edns_enable": "Увімкнути відправку EDNS Client Subnet",
"edns_cs_desc": "Додавати параметр EDNS Client Subnet (ECS) до запитів до upstream-серверів, а також записувати в журнал значення, що надсилаються клієнтами.",
+ "edns_use_custom_ip": "Використання користувацької IP-адреси для EDNS",
+ "edns_use_custom_ip_desc": "Дозволити використовувати користувацьку IP-адресу для EDNS",
"rate_limit_desc": "Кількість запитів в секунду, які може робити один клієнт. Встановлене значення «0» означатиме необмежену кількість.",
"blocking_ipv4_desc": "IP-адреса, яку потрібно видати для заблокованого A запиту",
"blocking_ipv6_desc": "IP-адреса, яку потрібно видати для заблокованого АААА запиту",
@@ -523,6 +526,10 @@
"statistics_retention_confirm": "Ви впевнені, що хочете змінити тривалість статистики? Якщо зменшити значення інтервалу, деякі дані будуть втрачені",
"statistics_cleared": "Статистику успішно очищено",
"statistics_enable": "Увімкнути статистику",
+ "ignore_domains": "Ігноровані домени (по одному на рядок)",
+ "ignore_domains_title": "Ігноровані домени",
+ "ignore_domains_desc_stats": "Запити для цих доменів в статистику не пишуться",
+ "ignore_domains_desc_query": "Запити для цих доменів не записуються до журналу запитів",
"interval_hours": "{{count}} година",
"interval_hours_plural": "{{count}} годин(и)",
"filters_configuration": "Конфігурація фільтрів",
@@ -643,5 +650,29 @@
"confirm_dns_cache_clear": "Ви впевнені, що бажаєте очистити кеш DNS?",
"cache_cleared": "Кеш DNS успішно очищено",
"clear_cache": "Очистити кеш",
- "protection_section_label": "Захист"
+ "make_static": "Зробити статичним",
+ "theme_auto_desc": "Автоматична (на основі теми вашого пристрою)",
+ "theme_dark_desc": "Темна тема",
+ "theme_light_desc": "Світла тема",
+ "disable_for_seconds": "На {{count}} секунду",
+ "disable_for_seconds_plural": "На {{count}} секунд",
+ "disable_for_minutes": "На {{count}} хвилину",
+ "disable_for_minutes_plural": "На {{count}} хвилин",
+ "disable_for_hours": "На {{count}} годину",
+ "disable_for_hours_plural": "На {{count}} годин",
+ "disable_until_tomorrow": "До завтра",
+ "disable_notify_for_seconds": "Вимкнення захисту на {{count}} секунду",
+ "disable_notify_for_seconds_plural": "Вимкнення захисту на {{count}} секунд",
+ "disable_notify_for_minutes": "Вимкнення захисту на {{count}} хвилину",
+ "disable_notify_for_minutes_plural": "Вимкнення захисту на {{count}} хвилин",
+ "disable_notify_for_hours": "Вимкнення захисту на {{count}} годину",
+ "disable_notify_for_hours_plural": "Вимкнення захисту на {{count}} годин",
+ "disable_notify_until_tomorrow": "Відключення захисту до завтра",
+ "enable_protection_timer": "Захист буде ввімкнено о {{time}}",
+ "custom_retention_input": "Введіть час в годинах",
+ "custom_rotation_input": "Введіть час в годинах",
+ "protection_section_label": "Захист",
+ "log_and_stats_section_label": "Журнал запитів і статистика",
+ "ignore_query_log": "Ігнорувати цей клієнт у журналі запитів",
+ "ignore_statistics": "Ігноруйте цей клієнт в статистиці"
}
diff --git a/client/src/helpers/filters/filters.js b/client/src/helpers/filters/filters.js
index c8b2d044..a100d0bb 100644
--- a/client/src/helpers/filters/filters.js
+++ b/client/src/helpers/filters/filters.js
@@ -100,6 +100,12 @@ export default {
"homepage": "https://github.com/DandelionSprout/adfilt",
"source": "https://adguardteam.github.io/HostlistsRegistry/assets/filter_13.txt"
},
+ "POL_cert_polska_list_of_malicious_domains": {
+ "name": "POL: CERT Polska List of malicious domains",
+ "categoryId": "regional",
+ "homepage": "https://cert.pl/posts/2020/03/ostrzezenia_phishing/",
+ "source": "https://adguardteam.github.io/HostlistsRegistry/assets/filter_41.txt"
+ },
"POL_polish_filters_for_pi_hole": {
"name": "POL: Polish filters for Pi-hole",
"categoryId": "regional",
@@ -118,6 +124,12 @@ export default {
"homepage": "https://github.com/bkrucarci/turk-adlist",
"source": "https://adguardteam.github.io/HostlistsRegistry/assets/filter_26.txt"
},
+ "TUR_turkish_ad_hosts": {
+ "name": "TUR: Turkish Ad Hosts",
+ "categoryId": "regional",
+ "homepage": "https://github.com/symbuzzer/Turkish-Ad-Hosts",
+ "source": "https://adguardteam.github.io/HostlistsRegistry/assets/filter_40.txt"
+ },
"VNM_abpvn": {
"name": "VNM: ABPVN List",
"categoryId": "regional",
@@ -214,6 +226,12 @@ export default {
"homepage": "https://github.com/durablenapkin/scamblocklist",
"source": "https://adguardteam.github.io/HostlistsRegistry/assets/filter_10.txt"
},
+ "shadowwhisperers_malware_list": {
+ "name": "ShadowWhisperer's Malware List",
+ "categoryId": "security",
+ "homepage": "https://github.com/ShadowWhisperer/BlockLists",
+ "source": "https://adguardteam.github.io/HostlistsRegistry/assets/filter_42.txt"
+ },
"staklerware_indicators_list": {
"name": "Stalkerware Indicators List",
"categoryId": "security",
diff --git a/client/src/helpers/trackers/trackers.json b/client/src/helpers/trackers/trackers.json
index a503492e..5195fe35 100644
--- a/client/src/helpers/trackers/trackers.json
+++ b/client/src/helpers/trackers/trackers.json
@@ -1,5 +1,5 @@
{
- "timeUpdated": "2023-04-06T10:46:09.881Z",
+ "timeUpdated": "2023-06-01T00:12:12.660Z",
"categories": {
"0": "audio_video_player",
"1": "comments",
@@ -19526,6 +19526,13 @@
"companyId": "qualcomm",
"source": "AdGuard"
},
+ "qualcomm_location_service": {
+ "name": "Qualcomm Location Service",
+ "categoryId": 15,
+ "url": "https://www.qualcomm.com/site/privacy/services",
+ "companyId": "qualcomm",
+ "source": "AdGuard"
+ },
"recaptcha": {
"name": "reCAPTCHA",
"categoryId": 8,
@@ -19533,6 +19540,55 @@
"companyId": "google",
"source": "AdGuard"
},
+ "samsung": {
+ "name": "Samsung",
+ "categoryId": 8,
+ "url": "https://www.samsung.com/",
+ "companyId": "samsung",
+ "source": "AdGuard"
+ },
+ "samsungads": {
+ "name": "Samsung Ads",
+ "categoryId": 4,
+ "url": "https://www.samsung.com/business/samsungads/",
+ "companyId": "samsung",
+ "source": "AdGuard"
+ },
+ "samsungapps": {
+ "name": "Samsung Apps",
+ "categoryId": 101,
+ "url": "https://www.samsung.com/au/apps/",
+ "companyId": "samsung",
+ "source": "AdGuard"
+ },
+ "samsungmobile": {
+ "name": "Samsung Mobile",
+ "categoryId": 101,
+ "url": "https://www.samsung.com/mobile/",
+ "companyId": "samsung",
+ "source": "AdGuard"
+ },
+ "samsungpush": {
+ "name": "Samsung Push",
+ "categoryId": 8,
+ "url": null,
+ "companyId": "samsung",
+ "source": "AdGuard"
+ },
+ "samsungsds": {
+ "name": "Samsung SDS",
+ "categoryId": 10,
+ "url": "https://www.samsungsds.com/",
+ "companyId": "samsung",
+ "source": "AdGuard"
+ },
+ "samsungtv": {
+ "name": "Samsung TV",
+ "categoryId": 15,
+ "url": "https://www.samsung.com/au/tvs/",
+ "companyId": "samsung",
+ "source": "AdGuard"
+ },
"sectigo": {
"name": "Sectigo Limited",
"categoryId": 5,
@@ -19589,6 +19645,13 @@
"companyId": "telstra",
"source": "AdGuard"
},
+ "ubuntu": {
+ "name": "Ubuntu",
+ "categoryId": 8,
+ "url": "https://ubuntu.com/",
+ "companyId": "ubuntu",
+ "source": "AdGuard"
+ },
"unity_ads": {
"name": "Unity Ads",
"categoryId": 4,
@@ -19651,6 +19714,13 @@
"url": "https://www.3gpp.org/",
"companyId": "3gpp",
"source": "AdGuard"
+ },
+ "7plus": {
+ "name": "7plus",
+ "categoryId": 0,
+ "url": "https://7plus.com.au/",
+ "companyId": "7plus",
+ "source": "AdGuard"
}
},
"trackerDomains": {
@@ -19843,8 +19913,8 @@
"adfreestyle.pl": "adfreestyle",
"adfront.org": "adfront",
"adfrontiers.com": "adfrontiers",
- "adgear.com": "adgear",
- "adgrx.com": "adgear",
+ "adgear.com": "samsungads",
+ "adgrx.com": "samsungads",
"adgebra.co.in": "adgebra",
"adgenie.co.uk": "adgenie",
"ad.adgile.com": "adgile",
@@ -24056,6 +24126,10 @@
"safebrowsing.g.applimg.com": "apple",
"applvn.com": "applovin",
"applovin.com": "applovin",
+ "bitbucket.org": "atlassian.net",
+ "jira.com": "atlassian.net",
+ "ss-inf.net": "atlassian.net",
+ "stspg-customer.com": "statuspage.io",
"blob.core.windows.net": "azure_blob_storage",
"azure.com": "azure",
"trafficmanager.net": "azure",
@@ -24063,6 +24137,21 @@
"mobileapptracking.com": "branch",
"bttn.io": "button",
"cloudflare-dns.com": "cloudflare",
+ "cloudflare-dm-cmpimg.com": "cloudflare",
+ "cloudflare-ipfs.com": "cloudflare",
+ "cloudflare-quic.com": "cloudflare",
+ "cloudflare-terms-of-service-abuse.com": "cloudflare",
+ "cloudflare.tv": "cloudflare",
+ "cloudflareaccess.com": "cloudflare",
+ "cloudflareclient.com": "cloudflare",
+ "cloudflareinsights.com": "cloudflare",
+ "cloudflareok.com": "cloudflare",
+ "cloudflareportal.com": "cloudflare",
+ "cloudflareresolve.com": "cloudflare",
+ "cloudflaressl.com": "cloudflare",
+ "cloudflarestatus.com": "cloudflare",
+ "pacloudflare.com": "cloudflare",
+ "sn-cloudflare.com": "cloudflare",
"crashlytics.com": "crashlytics",
"phicdn.net": "digicert_trust_seal",
"domain.glass": "domainglass",
@@ -24092,6 +24181,9 @@
"qy.net": "iqiyi",
"iqiyi.com": "iqiyi",
"iq.com": "iqiyi",
+ "ironsrc.com": "ironsource",
+ "ironsrc.net": "ironsource",
+ "supersonicads.com": "ironsource",
"karambasecurity.com": "karambasecurity",
"kik.com": "kik",
"apikik.com": "kik",
@@ -24121,6 +24213,23 @@
"mozilla.com": "mozilla",
"mozilla.net": "mozilla",
"mozilla.org": "mozilla",
+ "flxvpn.net": "netflix",
+ "netflix.ca": "netflix",
+ "netflix.com.au": "netflix",
+ "netflix.net": "netflix",
+ "netflixdnstest1.com": "netflix",
+ "netflixdnstest10.com": "netflix",
+ "netflixdnstest2.com": "netflix",
+ "netflixdnstest3.com": "netflix",
+ "netflixdnstest4.com": "netflix",
+ "netflixdnstest5.com": "netflix",
+ "netflixdnstest6.com": "netflix",
+ "netflixdnstest7.com": "netflix",
+ "netflixdnstest8.com": "netflix",
+ "netflixdnstest9.com": "netflix",
+ "netflixinvestor.com": "netflix",
+ "netflixstudios.com": "netflix",
+ "netflixtechblog.com": "netflix",
"nflximg.com": "netflix",
"netify.ai": "netify",
"nab.com": "nab",
@@ -24144,9 +24253,69 @@
"oztam.com.au": "oztam",
"plex.tv": "plex",
"plex.direct": "plex",
- "xtracloud.net": "qualcomm",
"qualcomm.com": "qualcomm",
+ "gpsonextra.net": "qualcomm_location_service",
+ "izatcloud.net": "qualcomm_location_service",
+ "xtracloud.net": "qualcomm_location_service",
"recaptcha.net": "recaptcha",
+ "samsungacr.com": "samsungads",
+ "samsungadhub.com": "samsungads",
+ "samsungads.com": "samsungads",
+ "samsungtifa.com": "samsungads",
+ "game-mode.net": "samsung",
+ "gos-gsp.io": "samsung",
+ "lldns.net": "samsung",
+ "pavv.co.kr": "samsung",
+ "remotesamsung.com": "samsung",
+ "samsung-gamelauncher.com": "samsung",
+ "samsung.co.kr": "samsung",
+ "samsung.com": "samsung",
+ "samsung.com.cn": "samsung",
+ "samsungcloud.com": "samsung",
+ "samsungcloudcdn.com": "samsung",
+ "samsungcloudprint.com": "samsung",
+ "samsungcloudsolution.com": "samsung",
+ "samsungcloudsolution.net": "samsung",
+ "samsungelectronics.com": "samsung",
+ "samsunghealth.com": "samsung",
+ "samsungiotcloud.com": "samsung",
+ "samsungknox.com": "samsung",
+ "samsungnyc.com": "samsung",
+ "samsungosp.com": "samsung",
+ "samsungotn.net": "samsung",
+ "samsungpositioning.com": "samsung",
+ "samsungqbe.com": "samsung",
+ "samsungrm.net": "samsung",
+ "samsungrs.com": "samsung",
+ "samsungsemi.com": "samsung",
+ "samsungsetup.com": "samsung",
+ "samsungusa.com": "samsung",
+ "secb2b.com": "samsung",
+ "smartthings.com": "samsung",
+ "ospserver.net": "samsungmobile",
+ "samsungdms.net": "samsungmobile",
+ "samsungmax.com": "samsungmobile",
+ "samsungmobile.com": "samsungmobile",
+ "secmobilesvc.com": "samsungmobile",
+ "internetat.tv": "samsungtv",
+ "samsungcloud.tv": "samsungtv",
+ "samsungsds.com": "samsungsds",
+ "push.samsungosp.com": "samsungpush",
+ "pushmessage.samsung.com": "samsungpush",
+ "scs.samsungqbe.com": "samsungpush",
+ "ssp.samsung.com": "samsungpush",
+ "aibixby.com": "samsungapps",
+ "findmymobile.samsung.com": "samsungapps",
+ "samsapps.cust.lldns.net": "samsungapps",
+ "samsung-omc.com": "samsungapps",
+ "samsungapps.com": "samsungapps",
+ "samsungdiroute.net": "samsungapps",
+ "samsungdive.com": "samsungapps",
+ "samsungdm.com": "samsungapps",
+ "samsungdmroute.com": "samsungapps",
+ "samsungmdec.com": "samsungapps",
+ "samsungvisioncloud.com": "samsungapps",
+ "sbixby.com": "samsungapps",
"sectigo.com": "sectigo",
"showrss.info": "showrss",
"similarweb.io": "similarweb",
@@ -24171,6 +24340,13 @@
"telstra.com.au": "telstra",
"telstra.com": "telstra",
"usertrust.com": "trustlogo",
+ "canonical.com": "ubuntu",
+ "launchpad.net": "ubuntu",
+ "launchpadcontent.net": "ubuntu",
+ "snapcraft.io": "ubuntu",
+ "snapcraftcontent.com": "ubuntu",
+ "ubuntu.com": "ubuntu",
+ "ubuntucompanyservices.co.za": "ubuntu",
"unityads.unity3d.com": "unity_ads",
"exp-tas.com": "vscode",
"vscode-unpkg.net": "vscode",
@@ -24190,6 +24366,7 @@
"yandex.kz": "yandex",
"appmetrica.yandex.com": "yandex_appmetrica",
"3gppnetwork.org": "3gpp",
- "3gpp.org": "3gpp"
+ "3gpp.org": "3gpp",
+ "swm.digital": "7plus"
}
}
diff --git a/docker/dns-bind.awk b/docker/dns-bind.awk
index 4173614c..abb5747c 100644
--- a/docker/dns-bind.awk
+++ b/docker/dns-bind.awk
@@ -7,11 +7,10 @@
addrs[$2] = true
prev_line = FNR
- if ($2 == "0.0.0.0" || $2 == "::") {
- delete addrs
- addrs["localhost"] = true
-
+ if ($2 == "0.0.0.0" || $2 == "'::'") {
# Drop all the other addresses.
+ delete addrs
+ addrs[""] = true
prev_line = -1
}
}
diff --git a/docker/healthcheck.sh b/docker/healthcheck.sh
index 881bbd60..a50de230 100755
--- a/docker/healthcheck.sh
+++ b/docker/healthcheck.sh
@@ -61,8 +61,11 @@ then
error_exit "no DNS bindings could be retrieved from $filename"
fi
+first_dns="$( echo "$dns_hosts" | head -n 1 )"
+readonly first_dns
+
# TODO(e.burkov): Deal with 0 port.
-case "$( echo "$dns_hosts" | head -n 1 )"
+case "$first_dns"
in
(*':0')
error_exit '0 in DNS port is not supported by healthcheck'
@@ -82,8 +85,23 @@ esac
# See https://github.com/AdguardTeam/AdGuardHome/issues/5642.
wget --no-check-certificate "$web_url" -O /dev/null -q || exit 1
-echo "$dns_hosts" | while read -r host
-do
- nslookup -type=a healthcheck.adguardhome.test. "$host" > /dev/null ||\
+test_fqdn="healthcheck.adguardhome.test."
+readonly test_fqdn
+
+# The awk script currently returns only port prefixed with colon in case of
+# unspecified address.
+case "$first_dns"
+in
+(':'*)
+ nslookup -type=a "$test_fqdn" "127.0.0.1${first_dns}" > /dev/null ||\
+ nslookup -type=a "$test_fqdn" "[::1]${first_dns}" > /dev/null ||\
error_exit "nslookup failed for $host"
-done
+ ;;
+(*)
+ echo "$dns_hosts" | while read -r host
+ do
+ nslookup -type=a "$test_fqdn" "$host" > /dev/null ||\
+ error_exit "nslookup failed for $host"
+ done
+ ;;
+esac
diff --git a/go.mod b/go.mod
index 911624bf..1776b59b 100644
--- a/go.mod
+++ b/go.mod
@@ -3,7 +3,7 @@ module github.com/AdguardTeam/AdGuardHome
go 1.19
require (
- github.com/AdguardTeam/dnsproxy v0.48.3
+ github.com/AdguardTeam/dnsproxy v0.50.0
github.com/AdguardTeam/golibs v0.13.2
github.com/AdguardTeam/urlfilter v0.16.1
github.com/NYTimes/gziphandler v1.1.1
@@ -16,24 +16,24 @@ require (
github.com/google/gopacket v1.1.19
github.com/google/renameio v1.0.1
github.com/google/uuid v1.3.0
- github.com/insomniacslk/dhcp v0.0.0-20221215072855-de60144f33f8
+ github.com/insomniacslk/dhcp v0.0.0-20230516061539-49801966e6cb
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86
github.com/kardianos/service v1.2.2
github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118
- github.com/mdlayher/netlink v1.7.1
- github.com/mdlayher/packet v1.1.1
+ github.com/mdlayher/netlink v1.7.2
+ github.com/mdlayher/packet v1.1.2
// TODO(a.garipov): This package is deprecated; find a new one or use our
// own code for that. Perhaps, use gopacket.
github.com/mdlayher/raw v0.1.0
- github.com/miekg/dns v1.1.53
- github.com/quic-go/quic-go v0.33.0
+ github.com/miekg/dns v1.1.54
+ github.com/quic-go/quic-go v0.35.1
github.com/stretchr/testify v1.8.2
github.com/ti-mo/netfilter v0.5.0
go.etcd.io/bbolt v1.3.7
- golang.org/x/crypto v0.8.0
- golang.org/x/exp v0.0.0-20230321023759-10a507213a29
- golang.org/x/net v0.9.0
- golang.org/x/sys v0.7.0
+ golang.org/x/crypto v0.9.0
+ golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1
+ golang.org/x/net v0.10.0
+ golang.org/x/sys v0.8.0
gopkg.in/natefinch/lumberjack.v2 v2.2.1
gopkg.in/yaml.v3 v3.0.1
howett.net/plist v1.0.0
@@ -48,9 +48,9 @@ require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/golang/mock v1.6.0 // indirect
- github.com/google/pprof v0.0.0-20230406165453-00490a63f317 // indirect
- github.com/mdlayher/socket v0.4.0 // indirect
- github.com/onsi/ginkgo/v2 v2.9.2 // indirect
+ github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 // indirect
+ github.com/mdlayher/socket v0.4.1 // indirect
+ github.com/onsi/ginkgo/v2 v2.10.0 // indirect
github.com/patrickmn/go-cache v2.1.0+incompatible // indirect
github.com/pierrec/lz4/v4 v4.1.17 // indirect
github.com/pkg/errors v0.9.1 // indirect
@@ -60,7 +60,7 @@ require (
github.com/quic-go/qtls-go1-20 v0.2.2 // indirect
github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 // indirect
golang.org/x/mod v0.10.0 // indirect
- golang.org/x/sync v0.1.0 // indirect
+ golang.org/x/sync v0.2.0 // indirect
golang.org/x/text v0.9.0 // indirect
- golang.org/x/tools v0.8.0 // indirect
+ golang.org/x/tools v0.9.3 // indirect
)
diff --git a/go.sum b/go.sum
index 6442a7db..f454ecb1 100644
--- a/go.sum
+++ b/go.sum
@@ -1,5 +1,5 @@
-github.com/AdguardTeam/dnsproxy v0.48.3 h1:h9xgDSmd1MqsPFNApyaPVXolmSTtzOWOcfWvPeDEP6s=
-github.com/AdguardTeam/dnsproxy v0.48.3/go.mod h1:Y7g7jRTd/u7+KJ/QvnGI2PCE8vnisp6EsW47/Sz0DZw=
+github.com/AdguardTeam/dnsproxy v0.50.0 h1:gqImxUMBVS8VQmGdXw0U7MjJNVzXkYaZ9NM5TKl3JBU=
+github.com/AdguardTeam/dnsproxy v0.50.0/go.mod h1:CQhZTkqC8X0ID6glrtyaxgqRRdiYfn1gJulC1cZ5Dn8=
github.com/AdguardTeam/golibs v0.4.0/go.mod h1:skKsDKIBB7kkFflLJBpfGX+G8QFTx0WKUzB6TIgtUj4=
github.com/AdguardTeam/golibs v0.10.4/go.mod h1:rSfQRGHIdgfxriDDNgNJ7HmE5zRoURq8R+VdR81Zuzw=
github.com/AdguardTeam/golibs v0.13.2 h1:BPASsyQKmb+b8VnvsNOHp7bKfcZl9Z+Z2UhPjOiupSc=
@@ -31,10 +31,9 @@ github.com/digineo/go-ipset/v2 v2.2.1 h1:k6skY+0fMqeUjjeWO/m5OuWPSZUAn7AucHMnQ1M
github.com/digineo/go-ipset/v2 v2.2.1/go.mod h1:wBsNzJlZlABHUITkesrggFnZQtgW5wkqw1uo8Qxe0VU=
github.com/dimfeld/httptreemux/v5 v5.5.0 h1:p8jkiMrCuZ0CmhwYLcbNbl7DDo21fozhKHQ2PccwOFQ=
github.com/dimfeld/httptreemux/v5 v5.5.0/go.mod h1:QeEylH57C0v3VO0tkKraVz9oD3Uu93CKPnTLbsidvSw=
-github.com/fanliao/go-promise v0.0.0-20141029170127-1890db352a72/go.mod h1:PjfxuH4FZdUyfMdtBio2lsRr1AKEaVPwelzuHuh8Lqc=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
+github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-ole/go-ole v1.2.5 h1:t4MGB5xEDZvXI+0rMjjsfBsD7yAgp/s9ZDkL1JndXwY=
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ping/ping v1.1.0 h1:3MCGhVX4fyEUuhsfwPrsEdQw6xspHkv5zHsiSoDFZYw=
@@ -45,72 +44,54 @@ github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
-github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
-github.com/google/pprof v0.0.0-20230406165453-00490a63f317 h1:hFhpt7CTmR3DX+b4R19ydQFtofxT0Sv3QsKNMVQYTMQ=
-github.com/google/pprof v0.0.0-20230406165453-00490a63f317/go.mod h1:79YE0hCXdHag9sBkw2o+N/YnZtTkXi0UT9Nnixa5eYk=
+github.com/google/pprof v0.0.0-20230602150820-91b7bce49751 h1:hR7/MlvK23p6+lIw9SN1TigNLn9ZnF3W4SYRKq2gAHs=
+github.com/google/pprof v0.0.0-20230602150820-91b7bce49751/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA=
github.com/google/renameio v1.0.1 h1:Lh/jXZmvZxb0BBeSY5VKEfidcbcbenKjZFzM/q0fSeU=
github.com/google/renameio v1.0.1/go.mod h1:t/HQoYBZSsWSNK35C6CO/TpPLDVWvxOHboWUAweKUpk=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714 h1:/jC7qQFrv8CrSJVmaolDVOxTfS9kc36uB6H40kdbQq8=
-github.com/hugelgupf/socketpair v0.0.0-20190730060125-05d35a94e714/go.mod h1:2Goc3h8EklBH5mspfHFxBnEoURQCGzQQH1ga9Myjvis=
-github.com/insomniacslk/dhcp v0.0.0-20221215072855-de60144f33f8 h1:Z72DOke2yOK0Ms4Z2LK1E1OrRJXOxSj5DllTz2FYTRg=
-github.com/insomniacslk/dhcp v0.0.0-20221215072855-de60144f33f8/go.mod h1:m5WMe03WCvWcXjRnhvaAbAAXdCnu20J5P+mmH44ZzpE=
+github.com/insomniacslk/dhcp v0.0.0-20230516061539-49801966e6cb h1:6fDKEAXwe3rsfS4khW3EZ8kEqmSiV9szhMPcDrD+Y7Q=
+github.com/insomniacslk/dhcp v0.0.0-20230516061539-49801966e6cb/go.mod h1:7474bZ1YNCvarT6WFKie4kEET6J0KYRDC4XJqqXzQW4=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/josharian/native v1.0.0/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
github.com/josharian/native v1.0.1-0.20221213033349-c1e37c09b531/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86 h1:elKwZS1OcdQ0WwEDBeqxKwb7WB62QX8bvZ/FJnVXIfk=
github.com/josharian/native v1.1.1-0.20230202152459-5c7d0dd6ab86/go.mod h1:aFAMtuldEgx/4q7iSGazk22+IcgvtiC+HIimFO9XlS8=
-github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw=
-github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ=
-github.com/jsimonetti/rtnetlink v0.0.0-20201009170750-9c6f07d100c1/go.mod h1:hqoO/u39cqLeBLebZ8fWdE96O7FxrAsRYhnVOdgHxok=
-github.com/jsimonetti/rtnetlink v0.0.0-20201110080708-d2c240429e6c/go.mod h1:huN4d1phzjhlOsNIjFsw2SVRbwIHj3fJDMEU2SDPTmg=
-github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX60=
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/mdlayher/ethernet v0.0.0-20190606142754-0394541c37b7/go.mod h1:U6ZQobyTjI/tJyq2HG+i/dfSoFUt8/aZCM+GKtmFk/Y=
github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118 h1:2oDp6OOhLxQ9JBoUuysVz9UZ9uI6oLUbvAZu0x8o+vE=
github.com/mdlayher/ethernet v0.0.0-20220221185849-529eae5b6118/go.mod h1:ZFUnHIVchZ9lJoWoEGUg8Q3M4U8aNNWA3CVSUTkW4og=
github.com/mdlayher/netlink v0.0.0-20190313131330-258ea9dff42c/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA=
-github.com/mdlayher/netlink v0.0.0-20190409211403-11939a169225/go.mod h1:eQB3mZE4aiYnlUsyGGCOpPETfdQq4Jhsgf1fk3cwQaA=
-github.com/mdlayher/netlink v1.0.0/go.mod h1:KxeJAFOFLG6AjpyDkQ/iIhxygIUKD+vcwqcnu43w/+M=
-github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcKp9uZHgmY=
-github.com/mdlayher/netlink v1.1.1/go.mod h1:WTYpFb/WTvlRJAyKhZL5/uy69TDDpHHu2VZmb2XgV7o=
-github.com/mdlayher/netlink v1.7.1 h1:FdUaT/e33HjEXagwELR8R3/KL1Fq5x3G5jgHLp/BTmg=
-github.com/mdlayher/netlink v1.7.1/go.mod h1:nKO5CSjE/DJjVhk/TNp6vCE1ktVxEA8VEh8drhZzxsQ=
+github.com/mdlayher/netlink v1.7.2 h1:/UtM3ofJap7Vl4QWCPDGXY8d3GIY2UGSDbK+QWmY8/g=
+github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU+ZGLfQSw=
github.com/mdlayher/packet v1.0.0/go.mod h1:eE7/ctqDhoiRhQ44ko5JZU2zxB88g+JH/6jmnjzPjOU=
-github.com/mdlayher/packet v1.1.1 h1:7Fv4OEMYqPl7//uBm04VgPpnSNi8fbBZznppgh6WMr8=
-github.com/mdlayher/packet v1.1.1/go.mod h1:DRvYY5mH4M4lUqAnMg04E60U4fjUKMZ/4g2cHElZkKo=
-github.com/mdlayher/raw v0.0.0-20190606142536-fef19f00fc18/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg=
-github.com/mdlayher/raw v0.0.0-20191009151244-50f2db8cc065/go.mod h1:7EpbotpCmVZcu+KCX4g9WaRNuu11uyhiW7+Le1dKawg=
+github.com/mdlayher/packet v1.1.2 h1:3Up1NG6LZrsgDVn6X4L9Ge/iyRyxFEFD9o6Pr3Q1nQY=
+github.com/mdlayher/packet v1.1.2/go.mod h1:GEu1+n9sG5VtiRE4SydOmX5GTwyyYlteZiFU+x0kew4=
github.com/mdlayher/raw v0.1.0 h1:K4PFMVy+AFsp0Zdlrts7yNhxc/uXoPVHi9RzRvtZF2Y=
github.com/mdlayher/raw v0.1.0/go.mod h1:yXnxvs6c0XoF/aK52/H5PjsVHmWBCFfZUfoh/Y5s9Sg=
github.com/mdlayher/socket v0.2.1/go.mod h1:QLlNPkFR88mRUNQIzRBMfXxwKal8H7u1h3bL1CV+f0E=
-github.com/mdlayher/socket v0.4.0 h1:280wsy40IC9M9q1uPGcLBwXpcTQDtoGwVt+BNoITxIw=
-github.com/mdlayher/socket v0.4.0/go.mod h1:xxFqz5GRCUN3UEOm9CZqEJsAbe1C8OwSK46NlmWuVoc=
+github.com/mdlayher/socket v0.4.1 h1:eM9y2/jlbs1M615oshPQOHZzj6R6wMT7bX5NPiQvn2U=
+github.com/mdlayher/socket v0.4.1/go.mod h1:cAqeGjoufqdxWkD7DkpyS+wcefOtmu5OQ8KuoJGIReA=
github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
-github.com/miekg/dns v1.1.53 h1:ZBkuHr5dxHtB1caEOlZTLPo7D3L3TWckgUUs/RHfDxw=
-github.com/miekg/dns v1.1.53/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
+github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI=
+github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU=
-github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts=
-github.com/onsi/gomega v1.27.4 h1:Z2AnStgsdSayCMDiCU42qIz+HLqEPcgiOCXjAU/w+8E=
+github.com/onsi/ginkgo/v2 v2.10.0 h1:sfUl4qgLdvkChZrWCYndY2EAu9BRIw1YphNAzy1VNWs=
+github.com/onsi/ginkgo/v2 v2.10.0/go.mod h1:UDQOh5wbQUlMnkLfVaIUMtQ1Vus92oM+P2JX1aulgcE=
+github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
@@ -127,12 +108,10 @@ github.com/quic-go/qtls-go1-19 v0.3.2 h1:tFxjCFcTQzK+oMxG6Zcvp4Dq8dx4yD3dDiIiyc8
github.com/quic-go/qtls-go1-19 v0.3.2/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI=
github.com/quic-go/qtls-go1-20 v0.2.2 h1:WLOPx6OY/hxtTxKV1Zrq20FtXtDEkeY00CGQm8GEa3E=
github.com/quic-go/qtls-go1-20 v0.2.2/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM=
-github.com/quic-go/quic-go v0.33.0 h1:ItNoTDN/Fm/zBlq769lLJc8ECe9gYaW40veHCCco7y0=
-github.com/quic-go/quic-go v0.33.0/go.mod h1:YMuhaAV9/jIu0XclDXwZPAsP/2Kgr5yMYhe9oxhhOFA=
+github.com/quic-go/quic-go v0.35.1 h1:b0kzj6b/cQAf05cT0CkQubHM31wiA+xH3IBkxP62poo=
+github.com/quic-go/quic-go v0.35.1/go.mod h1:+4CVgVppm0FNjpG3UcX8Joi/frKOH7/ciD5yGcwOO1g=
github.com/shirou/gopsutil/v3 v3.21.8 h1:nKct+uP0TV8DjjNiHanKf8SAuub+GNsbrOtM9Nl9biA=
github.com/shirou/gopsutil/v3 v3.21.8/go.mod h1:YWp/H8Qs5fVmf17v7JNZzA0mPJ+mS2e9JdiUF9LlKzQ=
-github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
-github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
@@ -152,7 +131,6 @@ github.com/tklauser/go-sysconf v0.3.9 h1:JeUVdAOWhhxVcU6Eqr/ATFHgXk/mmiItdKeJPev
github.com/tklauser/go-sysconf v0.3.9/go.mod h1:11DU/5sG7UexIrp/O6g35hrWzu0JxlwQ3LSFUzyeuhs=
github.com/tklauser/numcpus v0.3.0 h1:ILuRUQBtssgnxw0XXIjKUC56fgnOrFoQQ/4+DeU2biQ=
github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8=
-github.com/u-root/uio v0.0.0-20221213070652-c3537552635f/go.mod h1:IogEAUBXDEwX7oR/BMmCctShYs80ql4hF0ySdzGxf7E=
github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63 h1:YcojQL98T/OO+rybuzn2+5KrD5dBwXIvYBvQ2cD3Avg=
github.com/u-root/uio v0.0.0-20230305220412-3e8cd9d6bf63/go.mod h1:eLL9Nub3yfAho7qB0MzZizFhTU2QkLeoVsWdHtDW264=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
@@ -160,11 +138,10 @@ go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
-golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
-golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
-golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
+golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
+golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
+golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1 h1:k/i9J1pBpvlfR+9QsetwPyERsqu1GIbi967PQMq3Ivc=
+golang.org/x/exp v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@@ -172,40 +149,25 @@ golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190419010253-1f3472d942ba/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20201010224723-4f7140c49acb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210929193557-e81a3d93ecf6/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
-golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
+golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
+golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
+golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190322080309-f49334f85ddc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190418153312-f0ce4c0180be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606122018-79a91cf218c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -219,8 +181,8 @@ golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
-golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@@ -230,12 +192,11 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
-golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
+golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
+golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/internal/aghhttp/aghhttp.go b/internal/aghhttp/aghhttp.go
index b5878f92..6cb2c670 100644
--- a/internal/aghhttp/aghhttp.go
+++ b/internal/aghhttp/aghhttp.go
@@ -72,8 +72,8 @@ func WriteJSONResponse(w http.ResponseWriter, r *http.Request, resp any) (err er
// WriteJSONResponseCode is like [WriteJSONResponse] but adds the ability to
// redefine the status code.
func WriteJSONResponseCode(w http.ResponseWriter, r *http.Request, code int, resp any) (err error) {
- w.WriteHeader(code)
w.Header().Set(httphdr.ContentType, HdrValApplicationJSON)
+ w.WriteHeader(code)
err = json.NewEncoder(w).Encode(resp)
if err != nil {
Error(r, w, http.StatusInternalServerError, "encoding resp: %s", err)
diff --git a/internal/aghnet/dhcp_unix.go b/internal/aghnet/dhcp_unix.go
index 16c3c87a..cb44e29d 100644
--- a/internal/aghnet/dhcp_unix.go
+++ b/internal/aghnet/dhcp_unix.go
@@ -304,7 +304,7 @@ func tryConn6(req *dhcpv6.Message, c net.PacketConn) (ok, next bool, err error)
if !(response.Type() == dhcpv6.MessageTypeAdvertise &&
msg.TransactionID == req.TransactionID &&
rcid != nil &&
- cid.Equal(*rcid)) {
+ cid.Equal(rcid)) {
log.Debug("dhcpv6: received message from server doesn't match our request")
diff --git a/internal/aghos/service.go b/internal/aghos/service.go
new file mode 100644
index 00000000..4be05dd2
--- /dev/null
+++ b/internal/aghos/service.go
@@ -0,0 +1,6 @@
+package aghos
+
+// PreCheckActionStart performs the service start action pre-check.
+func PreCheckActionStart() (err error) {
+ return preCheckActionStart()
+}
diff --git a/internal/aghos/service_darwin.go b/internal/aghos/service_darwin.go
new file mode 100644
index 00000000..b87f95ed
--- /dev/null
+++ b/internal/aghos/service_darwin.go
@@ -0,0 +1,32 @@
+//go:build darwin
+
+package aghos
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/AdguardTeam/golibs/log"
+)
+
+// preCheckActionStart performs the service start action pre-check. It warns
+// user that the service should be installed into Applications directory.
+func preCheckActionStart() (err error) {
+ exe, err := os.Executable()
+ if err != nil {
+ return fmt.Errorf("getting executable path: %v", err)
+ }
+
+ exe, err = filepath.EvalSymlinks(exe)
+ if err != nil {
+ return fmt.Errorf("evaluating executable symlinks: %v", err)
+ }
+
+ if !strings.HasPrefix(exe, "/Applications/") {
+ log.Info("warning: service must be started from within the /Applications directory")
+ }
+
+ return err
+}
diff --git a/internal/aghos/service_others.go b/internal/aghos/service_others.go
new file mode 100644
index 00000000..0869f53f
--- /dev/null
+++ b/internal/aghos/service_others.go
@@ -0,0 +1,8 @@
+//go:build !darwin
+
+package aghos
+
+// preCheckActionStart performs the service start action pre-check.
+func preCheckActionStart() (err error) {
+ return nil
+}
diff --git a/internal/dhcpd/README.md b/internal/dhcpd/README.md
index fb2bdc8d..5c692e04 100644
--- a/internal/dhcpd/README.md
+++ b/internal/dhcpd/README.md
@@ -1,46 +1,60 @@
-# DHCP server
+ # Testing DHCP Server
Contents:
-* [Test setup with Virtual Box](#vbox)
+ * [Test setup with Virtual Box](#vbox)
+ * [Quick test with DHCPTest](#dhcptest)
-
-## Test setup with Virtual Box
+## Test setup with Virtual Box
-To set up a test environment for DHCP server you need:
+ ### Prerequisites
-* Linux host machine
-* Virtual Box
-* Virtual machine (guest OS doesn't matter)
+To set up a test environment for DHCP server you will need:
-### Configure client
+ * Linux AG Home host machine (Virtual).
+ * Virtual Box.
+ * Virtual machine (guest OS doesn't matter).
-1. Install Virtual Box and run the following command to create a Host-Only network:
+ ### Configure Virtual Box
- $ VBoxManage hostonlyif create
+ 1. Install Virtual Box and run the following command to create a Host-Only
+ network:
- You can check its status by `ip a` command.
+ ```sh
+ $ VBoxManage hostonlyif create
+ ```
+
+ You can check its status by `ip a` command.
- You can also set up Host-Only network using Virtual Box menu:
+ You can also set up Host-Only network using Virtual Box menu:
+
+ ```
+ File -> Host Network Manager...
+ ```
- File -> Host Network Manager...
+ 2. Create your virtual machine and set up its network:
-2. Create your virtual machine and set up its network:
+ ```
+ VM Settings -> Network -> Host-only Adapter
+ ```
- VM Settings -> Network -> Host-only Adapter
+ 3. Start your VM, install an OS. Configure your network interface to use
+ DHCP and the OS should ask for a IP address from our DHCP server.
-3. Start your VM, install an OS. Configure your network interface to use DHCP and the OS should ask for a IP address from our DHCP server.
+ 4. To see the current IP addresses on client OS you can use `ip a` command on
+ Linux or `ipconfig` on Windows.
-4. To see the current IP address on client OS you can use `ip a` command on Linux or `ipconfig` on Windows.
+ 5. To force the client OS to request an IP from DHCP server again, you can
+ use `dhclient` on Linux or `ipconfig /release` on Windows.
-5. To force the client OS to request an IP from DHCP server again, you can use `dhclient` on Linux or `ipconfig /release` on Windows.
+ ### Configure server
-### Configure server
+ 1. Edit server configuration file `AdGuardHome.yaml`, for example:
-1. Edit server configuration file 'AdGuardHome.yaml', for example:
-
- dhcp:
+ ```yaml
+ dhcp:
enabled: true
interface_name: vboxnet0
+ local_domain_name: lan
dhcpv4:
gateway_ip: 192.168.56.1
subnet_mask: 255.255.255.0
@@ -54,11 +68,29 @@ To set up a test environment for DHCP server you need:
lease_duration: 86400
ra_slaac_only: false
ra_allow_slaac: false
+ ```
-2. Start the server
+ 2. Start the server
- ./AdGuardHome
+ ```sh
+ ./AdGuardHome -v
+ ```
- There should be a message in log which shows that DHCP server is ready:
+ There should be a message in log which shows that DHCP server is ready:
- [info] DHCP: listening on 0.0.0.0:67
+ ```
+ [info] DHCP: listening on 0.0.0.0:67
+ ```
+
+## Quick test with DHCPTest utility
+
+ ### Prerequisites
+
+ * [DHCP test utility][dhcptest-gh].
+
+ ### Quick test
+
+The DHCP server could be tested for DISCOVER-OFFER packets with in
+interactive mode.
+
+[dhcptest-gh]: https://github.com/CyberShadow/dhcptest
diff --git a/internal/dhcpd/conn_darwin.go b/internal/dhcpd/conn_bsd.go
similarity index 99%
rename from internal/dhcpd/conn_darwin.go
rename to internal/dhcpd/conn_bsd.go
index a80ae482..3a8aff93 100644
--- a/internal/dhcpd/conn_darwin.go
+++ b/internal/dhcpd/conn_bsd.go
@@ -1,4 +1,4 @@
-//go:build darwin
+//go:build darwin || freebsd || openbsd
package dhcpd
diff --git a/internal/dhcpd/conn_darwin_internal_test.go b/internal/dhcpd/conn_bsd_internal_test.go
similarity index 99%
rename from internal/dhcpd/conn_darwin_internal_test.go
rename to internal/dhcpd/conn_bsd_internal_test.go
index e0522a0f..9f3505b7 100644
--- a/internal/dhcpd/conn_darwin_internal_test.go
+++ b/internal/dhcpd/conn_bsd_internal_test.go
@@ -1,4 +1,4 @@
-//go:build darwin
+//go:build darwin || freebsd || openbsd
package dhcpd
diff --git a/internal/dhcpd/conn_unix.go b/internal/dhcpd/conn_linux.go
similarity index 99%
rename from internal/dhcpd/conn_unix.go
rename to internal/dhcpd/conn_linux.go
index 5602d126..1ebcc339 100644
--- a/internal/dhcpd/conn_unix.go
+++ b/internal/dhcpd/conn_linux.go
@@ -1,4 +1,4 @@
-//go:build freebsd || linux || openbsd
+//go:build linux
package dhcpd
diff --git a/internal/dhcpd/conn_unix_internal_test.go b/internal/dhcpd/conn_linux_internal_test.go
similarity index 99%
rename from internal/dhcpd/conn_unix_internal_test.go
rename to internal/dhcpd/conn_linux_internal_test.go
index ca68c1b9..ab344fd2 100644
--- a/internal/dhcpd/conn_unix_internal_test.go
+++ b/internal/dhcpd/conn_linux_internal_test.go
@@ -1,4 +1,4 @@
-//go:build freebsd || linux || openbsd
+//go:build linux
package dhcpd
diff --git a/internal/dhcpd/dhcpd.go b/internal/dhcpd/dhcpd.go
index 69082c0c..5a3656d1 100644
--- a/internal/dhcpd/dhcpd.go
+++ b/internal/dhcpd/dhcpd.go
@@ -239,36 +239,16 @@ func Create(conf *ServerConfig) (s *server, err error) {
// [aghhttp.RegisterFunc].
s.registerHandlers()
- v4conf := conf.Conf4
- v4conf.InterfaceName = s.conf.InterfaceName
- v4conf.notify = s.onNotify
- v4conf.Enabled = s.conf.Enabled && v4conf.RangeStart.IsValid()
-
- s.srv4, err = v4Create(&v4conf)
+ v4Enabled, v6Enabled, err := s.setServers(conf)
if err != nil {
- if v4conf.Enabled {
- return nil, fmt.Errorf("creating dhcpv4 srv: %w", err)
- }
-
- log.Debug("dhcpd: warning: creating dhcpv4 srv: %s", err)
- }
-
- v6conf := conf.Conf6
- v6conf.Enabled = s.conf.Enabled
- if len(v6conf.RangeStart) == 0 {
- v6conf.Enabled = false
- }
- v6conf.InterfaceName = s.conf.InterfaceName
- v6conf.notify = s.onNotify
- s.srv6, err = v6Create(v6conf)
- if err != nil {
- return nil, fmt.Errorf("creating dhcpv6 srv: %w", err)
+ // Don't wrap the error, because it's informative enough as is.
+ return nil, err
}
s.conf.Conf4 = conf.Conf4
s.conf.Conf6 = conf.Conf6
- if s.conf.Enabled && !v4conf.Enabled && !v6conf.Enabled {
+ if s.conf.Enabled && !v4Enabled && !v6Enabled {
return nil, fmt.Errorf("neither dhcpv4 nor dhcpv6 srv is configured")
}
@@ -289,6 +269,39 @@ func Create(conf *ServerConfig) (s *server, err error) {
return s, nil
}
+// setServers updates DHCPv4 and DHCPv6 servers created from the provided
+// configuration conf.
+func (s *server) setServers(conf *ServerConfig) (v4Enabled, v6Enabled bool, err error) {
+ v4conf := conf.Conf4
+ v4conf.InterfaceName = s.conf.InterfaceName
+ v4conf.notify = s.onNotify
+ v4conf.Enabled = s.conf.Enabled && v4conf.RangeStart.IsValid()
+
+ s.srv4, err = v4Create(&v4conf)
+ if err != nil {
+ if v4conf.Enabled {
+ return true, false, fmt.Errorf("creating dhcpv4 srv: %w", err)
+ }
+
+ log.Debug("dhcpd: warning: creating dhcpv4 srv: %s", err)
+ }
+
+ v6conf := conf.Conf6
+ v6conf.InterfaceName = s.conf.InterfaceName
+ v6conf.notify = s.onNotify
+ v6conf.Enabled = s.conf.Enabled
+ if len(v6conf.RangeStart) == 0 {
+ v6conf.Enabled = false
+ }
+
+ s.srv6, err = v6Create(v6conf)
+ if err != nil {
+ return v4conf.Enabled, v6conf.Enabled, fmt.Errorf("creating dhcpv6 srv: %w", err)
+ }
+
+ return v4conf.Enabled, v6conf.Enabled, nil
+}
+
// Enabled returns true when the server is enabled.
func (s *server) Enabled() (ok bool) {
return s.conf.Enabled
diff --git a/internal/dhcpd/http_unix.go b/internal/dhcpd/http_unix.go
index 6430afdc..b07f9543 100644
--- a/internal/dhcpd/http_unix.go
+++ b/internal/dhcpd/http_unix.go
@@ -16,6 +16,7 @@ import (
"github.com/AdguardTeam/AdGuardHome/internal/aghnet"
"github.com/AdguardTeam/golibs/errors"
"github.com/AdguardTeam/golibs/log"
+ "github.com/AdguardTeam/golibs/netutil"
)
type v4ServerConfJSON struct {
@@ -263,6 +264,28 @@ func (s *server) handleDHCPSetConfigV6(
return srv6, enabled, err
}
+// createServers returns DHCPv4 and DHCPv6 servers created from the provided
+// configuration conf.
+func (s *server) createServers(conf *dhcpServerConfigJSON) (srv4, srv6 DHCPServer, err error) {
+ srv4, v4Enabled, err := s.handleDHCPSetConfigV4(conf)
+ if err != nil {
+ return nil, nil, fmt.Errorf("bad dhcpv4 configuration: %s", err)
+ }
+
+ srv6, v6Enabled, err := s.handleDHCPSetConfigV6(conf)
+ if err != nil {
+ return nil, nil, fmt.Errorf("bad dhcpv6 configuration: %s", err)
+ }
+
+ if conf.Enabled == aghalg.NBTrue && !v4Enabled && !v6Enabled {
+ return nil, nil, fmt.Errorf("dhcpv4 or dhcpv6 configuration must be complete")
+ }
+
+ return srv4, srv6, nil
+}
+
+// handleDHCPSetConfig is the handler for the POST /control/dhcp/set_config
+// HTTP API.
func (s *server) handleDHCPSetConfig(w http.ResponseWriter, r *http.Request) {
conf := &dhcpServerConfigJSON{}
conf.Enabled = aghalg.BoolToNullBool(s.conf.Enabled)
@@ -275,22 +298,9 @@ func (s *server) handleDHCPSetConfig(w http.ResponseWriter, r *http.Request) {
return
}
- srv4, v4Enabled, err := s.handleDHCPSetConfigV4(conf)
+ srv4, srv6, err := s.createServers(conf)
if err != nil {
- aghhttp.Error(r, w, http.StatusBadRequest, "bad dhcpv4 configuration: %s", err)
-
- return
- }
-
- srv6, v6Enabled, err := s.handleDHCPSetConfigV6(conf)
- if err != nil {
- aghhttp.Error(r, w, http.StatusBadRequest, "bad dhcpv6 configuration: %s", err)
-
- return
- }
-
- if conf.Enabled == aghalg.NBTrue && !v4Enabled && !v6Enabled {
- aghhttp.Error(r, w, http.StatusBadRequest, "dhcpv4 or dhcpv6 configuration must be complete")
+ aghhttp.Error(r, w, http.StatusBadRequest, "%s", err)
return
}
@@ -350,8 +360,10 @@ type netInterfaceJSON struct {
Addrs6 []netip.Addr `json:"ipv6_addresses"`
}
+// handleDHCPInterfaces is the handler for the GET /control/dhcp/interfaces
+// HTTP API.
func (s *server) handleDHCPInterfaces(w http.ResponseWriter, r *http.Request) {
- response := map[string]netInterfaceJSON{}
+ resp := map[string]*netInterfaceJSON{}
ifaces, err := net.Interfaces()
if err != nil {
@@ -362,82 +374,86 @@ func (s *server) handleDHCPInterfaces(w http.ResponseWriter, r *http.Request) {
for _, iface := range ifaces {
if iface.Flags&net.FlagLoopback != 0 {
- // it's a loopback, skip it
- continue
- }
- if iface.Flags&net.FlagBroadcast == 0 {
- // this interface doesn't support broadcast, skip it
+ // It's a loopback, skip it.
continue
}
- var addrs []net.Addr
- addrs, err = iface.Addrs()
- if err != nil {
- aghhttp.Error(
- r,
- w,
- http.StatusInternalServerError,
- "Failed to get addresses for interface %s: %s",
- iface.Name,
- err,
- )
+ if iface.Flags&net.FlagBroadcast == 0 {
+ // This interface doesn't support broadcast, skip it.
+ continue
+ }
+
+ jsonIface, iErr := newNetInterfaceJSON(iface)
+ if iErr != nil {
+ aghhttp.Error(r, w, http.StatusInternalServerError, "%s", iErr)
return
}
- jsonIface := netInterfaceJSON{
- Name: iface.Name,
- HardwareAddr: iface.HardwareAddr.String(),
- }
-
- if iface.Flags != 0 {
- jsonIface.Flags = iface.Flags.String()
- }
- // we don't want link-local addresses in json, so skip them
- for _, addr := range addrs {
- ipnet, ok := addr.(*net.IPNet)
- if !ok {
- // not an IPNet, should not happen
- aghhttp.Error(
- r,
- w,
- http.StatusInternalServerError,
- "got iface.Addrs() element %[1]s that is not net.IPNet, it is %[1]T",
- addr)
-
- return
- }
- // ignore link-local
- //
- // TODO(e.burkov): Try to listen DHCP on LLA as well.
- if ipnet.IP.IsLinkLocalUnicast() {
- continue
- }
-
- if ip4 := ipnet.IP.To4(); ip4 != nil {
- addr := netip.AddrFrom4(*(*[4]byte)(ip4))
- jsonIface.Addrs4 = append(jsonIface.Addrs4, addr)
- } else {
- addr := netip.AddrFrom16(*(*[16]byte)(ipnet.IP))
- jsonIface.Addrs6 = append(jsonIface.Addrs6, addr)
- }
- }
- if len(jsonIface.Addrs4)+len(jsonIface.Addrs6) != 0 {
- jsonIface.GatewayIP = aghnet.GatewayIP(iface.Name)
- response[iface.Name] = jsonIface
+ if jsonIface != nil {
+ resp[iface.Name] = jsonIface
}
}
- err = json.NewEncoder(w).Encode(response)
+ _ = aghhttp.WriteJSONResponse(w, r, resp)
+}
+
+// newNetInterfaceJSON creates a JSON object from a [net.Interface] iface.
+func newNetInterfaceJSON(iface net.Interface) (out *netInterfaceJSON, err error) {
+ addrs, err := iface.Addrs()
if err != nil {
- aghhttp.Error(
- r,
- w,
- http.StatusInternalServerError,
- "Failed to marshal json with available interfaces: %s",
+ return nil, fmt.Errorf(
+ "failed to get addresses for interface %s: %s",
+ iface.Name,
err,
)
}
+
+ out = &netInterfaceJSON{
+ Name: iface.Name,
+ HardwareAddr: iface.HardwareAddr.String(),
+ }
+
+ if iface.Flags != 0 {
+ out.Flags = iface.Flags.String()
+ }
+
+ // We don't want link-local addresses in JSON, so skip them.
+ for _, addr := range addrs {
+ ipNet, ok := addr.(*net.IPNet)
+ if !ok {
+ // Not an IPNet, should not happen.
+ return nil, fmt.Errorf("got iface.Addrs() element %[1]s that is not"+
+ " net.IPNet, it is %[1]T", addr)
+ }
+
+ // Ignore link-local.
+ //
+ // TODO(e.burkov): Try to listen DHCP on LLA as well.
+ if ipNet.IP.IsLinkLocalUnicast() {
+ continue
+ }
+
+ vAddr, iErr := netutil.IPToAddrNoMapped(ipNet.IP)
+ if iErr != nil {
+ // Not an IPNet, should not happen.
+ return nil, fmt.Errorf("failed to convert IP address %[1]s: %w", addr, iErr)
+ }
+
+ if vAddr.Is4() {
+ out.Addrs4 = append(out.Addrs4, vAddr)
+ } else {
+ out.Addrs6 = append(out.Addrs6, vAddr)
+ }
+ }
+
+ if len(out.Addrs4)+len(out.Addrs6) == 0 {
+ return nil, nil
+ }
+
+ out.GatewayIP = aghnet.GatewayIP(iface.Name)
+
+ return out, nil
}
// dhcpSearchOtherResult contains information about other DHCP server for
diff --git a/internal/dhcpd/routeradv.go b/internal/dhcpd/routeradv.go
index 9c87ca9f..a826df91 100644
--- a/internal/dhcpd/routeradv.go
+++ b/internal/dhcpd/routeradv.go
@@ -7,6 +7,7 @@ import (
"sync/atomic"
"time"
+ "github.com/AdguardTeam/golibs/errors"
"github.com/AdguardTeam/golibs/log"
"github.com/AdguardTeam/golibs/netutil"
"golang.org/x/net/icmp"
@@ -195,7 +196,7 @@ func createICMPv6RAPacket(params icmpv6RA) (data []byte, err error) {
return data, nil
}
-// Init - initialize RA module
+// Init initializes RA module.
func (ra *raCtx) Init() (err error) {
ra.stop.Store(0)
ra.conn = nil
@@ -203,8 +204,7 @@ func (ra *raCtx) Init() (err error) {
return nil
}
- log.Debug("dhcpv6 ra: source IP address: %s DNS IP address: %s",
- ra.ipAddr, ra.dnsIPAddr)
+ log.Debug("dhcpv6 ra: source IP address: %s DNS IP address: %s", ra.ipAddr, ra.dnsIPAddr)
params := icmpv6RA{
managedAddressConfiguration: !ra.raSLAACOnly,
@@ -223,18 +223,15 @@ func (ra *raCtx) Init() (err error) {
return fmt.Errorf("creating packet: %w", err)
}
- success := false
ipAndScope := ra.ipAddr.String() + "%" + ra.ifaceName
ra.conn, err = icmp.ListenPacket("ip6:ipv6-icmp", ipAndScope)
if err != nil {
return fmt.Errorf("dhcpv6 ra: icmp.ListenPacket: %w", err)
}
+
defer func() {
- if !success {
- derr := ra.Close()
- if derr != nil {
- log.Error("closing context: %s", derr)
- }
+ if err != nil {
+ err = errors.WithDeferred(err, ra.Close())
}
}()
@@ -269,7 +266,6 @@ func (ra *raCtx) Init() (err error) {
log.Debug("dhcpv6 ra: loop exit")
}()
- success = true
return nil
}
diff --git a/internal/dhcpd/v4_unix.go b/internal/dhcpd/v4_unix.go
index 20b2c96e..34f96210 100644
--- a/internal/dhcpd/v4_unix.go
+++ b/internal/dhcpd/v4_unix.go
@@ -342,8 +342,8 @@ func (s *v4Server) rmLease(lease *Lease) (err error) {
// server to be configured and it's not.
const ErrUnconfigured errors.Error = "server is unconfigured"
-// AddStaticLease implements the DHCPServer interface for *v4Server. It is safe
-// for concurrent use.
+// AddStaticLease implements the DHCPServer interface for *v4Server. It is
+// safe for concurrent use.
func (s *v4Server) AddStaticLease(l *Lease) (err error) {
defer func() { err = errors.Annotate(err, "dhcpv4: adding static lease: %w") }()
@@ -354,21 +354,23 @@ func (s *v4Server) AddStaticLease(l *Lease) (err error) {
l.IP = l.IP.Unmap()
if !l.IP.Is4() {
- return fmt.Errorf("invalid ip %q, only ipv4 is supported", l.IP)
+ return fmt.Errorf("invalid IP %q: only IPv4 is supported", l.IP)
} else if gwIP := s.conf.GatewayIP; gwIP == l.IP {
- return fmt.Errorf("can't assign the gateway IP %s to the lease", gwIP)
+ return fmt.Errorf("can't assign the gateway IP %q to the lease", gwIP)
}
l.IsStatic = true
err = netutil.ValidateMAC(l.HWAddr)
if err != nil {
+ // Don't wrap the error, because it's informative enough as is.
return err
}
if hostname := l.Hostname; hostname != "" {
hostname, err = normalizeHostname(hostname)
if err != nil {
+ // Don't wrap the error, because it's informative enough as is.
return err
}
@@ -386,32 +388,9 @@ func (s *v4Server) AddStaticLease(l *Lease) (err error) {
l.Hostname = hostname
}
- // Perform the following actions in an anonymous function to make sure
- // that the lock gets unlocked before the notification step.
- func() {
- s.leasesLock.Lock()
- defer s.leasesLock.Unlock()
-
- err = s.rmDynamicLease(l)
- if err != nil {
- err = fmt.Errorf(
- "removing dynamic leases for %s (%s): %w",
- l.IP,
- l.HWAddr,
- err,
- )
-
- return
- }
-
- err = s.addLease(l)
- if err != nil {
- err = fmt.Errorf("adding static lease for %s (%s): %w", l.IP, l.HWAddr, err)
-
- return
- }
- }()
+ err = s.updateStaticLease(l)
if err != nil {
+ // Don't wrap the error, because it's informative enough as is.
return err
}
@@ -421,6 +400,25 @@ func (s *v4Server) AddStaticLease(l *Lease) (err error) {
return nil
}
+// updateStaticLease safe removes dynamic lease with the same properties and
+// then adds a static lease l.
+func (s *v4Server) updateStaticLease(l *Lease) (err error) {
+ s.leasesLock.Lock()
+ defer s.leasesLock.Unlock()
+
+ err = s.rmDynamicLease(l)
+ if err != nil {
+ return fmt.Errorf("removing dynamic leases for %s (%s): %w", l.IP, l.HWAddr, err)
+ }
+
+ err = s.addLease(l)
+ if err != nil {
+ return fmt.Errorf("adding static lease for %s (%s): %w", l.IP, l.HWAddr, err)
+ }
+
+ return nil
+}
+
// RemoveStaticLease removes a static lease. It is safe for concurrent use.
func (s *v4Server) RemoveStaticLease(l *Lease) (err error) {
defer func() { err = errors.Annotate(err, "dhcpv4: %w") }()
@@ -894,24 +892,9 @@ func (s *v4Server) handleDecline(req, resp *dhcpv4.DHCPv4) (err error) {
reqIP = req.ClientIPAddr
}
- netIP, ok := netip.AddrFromSlice(reqIP)
- if !ok {
- log.Info("dhcpv4: invalid IP: %s", reqIP)
-
- return nil
- }
-
- var oldLease *Lease
- for _, l := range s.leases {
- if bytes.Equal(l.HWAddr, mac) && l.IP == netIP {
- oldLease = l
-
- break
- }
- }
-
+ oldLease := s.findLeaseForIP(reqIP, mac)
if oldLease == nil {
- log.Info("dhcpv4: lease with ip %s for %s not found", reqIP, mac)
+ log.Info("dhcpv4: lease with IP %s for %s not found", reqIP, mac)
return nil
}
@@ -925,7 +908,7 @@ func (s *v4Server) handleDecline(req, resp *dhcpv4.DHCPv4) (err error) {
if err != nil {
return fmt.Errorf("allocating new lease for %s: %w", mac, err)
} else if newLease == nil {
- log.Info("dhcpv4: allocating new lease for %s: no more ip addresses", mac)
+ log.Info("dhcpv4: allocating new lease for %s: no more IP addresses", mac)
resp.YourIPAddr = make([]byte, 4)
resp.UpdateOption(dhcpv4.OptMessageType(dhcpv4.MessageTypeAck))
@@ -941,15 +924,32 @@ func (s *v4Server) handleDecline(req, resp *dhcpv4.DHCPv4) (err error) {
return fmt.Errorf("adding new lease for %s: %w", mac, err)
}
- log.Info("dhcpv4: changed ip from %s to %s for %s", reqIP, newLease.IP, mac)
-
- resp.YourIPAddr = net.IP(newLease.IP.AsSlice())
+ log.Info("dhcpv4: changed IP from %s to %s for %s", reqIP, newLease.IP, mac)
+ resp.YourIPAddr = newLease.IP.AsSlice()
resp.UpdateOption(dhcpv4.OptMessageType(dhcpv4.MessageTypeAck))
return nil
}
+// findLeaseForIP returns a lease for provided ip and mac.
+func (s *v4Server) findLeaseForIP(ip net.IP, mac net.HardwareAddr) (l *Lease) {
+ netIP, ok := netip.AddrFromSlice(ip)
+ if !ok {
+ log.Info("dhcpv4: invalid IP: %s", ip)
+
+ return nil
+ }
+
+ for _, il := range s.leases {
+ if bytes.Equal(il.HWAddr, mac) && il.IP == netIP {
+ return il
+ }
+ }
+
+ return nil
+}
+
// handleRelease is the handler for the DHCP Release request.
func (s *v4Server) handleRelease(req, resp *dhcpv4.DHCPv4) (err error) {
mac := req.ClientHWAddr
@@ -995,11 +995,80 @@ func (s *v4Server) handleRelease(req, resp *dhcpv4.DHCPv4) (err error) {
return nil
}
-// Find a lease associated with MAC and prepare response
-// Return 1: OK
-// Return 0: error; reply with Nak
-// Return -1: error; don't reply
-func (s *v4Server) handle(req, resp *dhcpv4.DHCPv4) int {
+// messageHandler describes a DHCPv4 message handler function.
+type messageHandler func(s *v4Server, req, resp *dhcpv4.DHCPv4) (rCode int, l *Lease, err error)
+
+// messageHandlers is a map of handlers for various messages with message types
+// keys.
+var messageHandlers = map[dhcpv4.MessageType]messageHandler{
+ dhcpv4.MessageTypeDiscover: func(
+ s *v4Server,
+ req *dhcpv4.DHCPv4,
+ resp *dhcpv4.DHCPv4,
+ ) (rCode int, l *Lease, err error) {
+ l, err = s.handleDiscover(req, resp)
+ if err != nil {
+ return 0, nil, fmt.Errorf("handling discover: %s", err)
+ }
+
+ if l == nil {
+ return 0, nil, nil
+ }
+
+ return 1, l, nil
+ },
+ dhcpv4.MessageTypeRequest: func(
+ s *v4Server,
+ req *dhcpv4.DHCPv4,
+ resp *dhcpv4.DHCPv4,
+ ) (rCode int, l *Lease, err error) {
+ var toReply bool
+ l, toReply = s.handleRequest(req, resp)
+ if l == nil {
+ if toReply {
+ return 0, nil, nil
+ }
+
+ // Drop the packet.
+ return -1, nil, nil
+ }
+
+ return 1, l, nil
+ },
+ dhcpv4.MessageTypeDecline: func(
+ s *v4Server,
+ req *dhcpv4.DHCPv4,
+ resp *dhcpv4.DHCPv4,
+ ) (rCode int, l *Lease, err error) {
+ err = s.handleDecline(req, resp)
+ if err != nil {
+ return 0, nil, fmt.Errorf("handling decline: %s", err)
+ }
+
+ return 1, nil, nil
+ },
+ dhcpv4.MessageTypeRelease: func(
+ s *v4Server,
+ req *dhcpv4.DHCPv4,
+ resp *dhcpv4.DHCPv4,
+ ) (rCode int, l *Lease, err error) {
+ err = s.handleRelease(req, resp)
+ if err != nil {
+ return 0, nil, fmt.Errorf("handling release: %s", err)
+ }
+
+ return 1, nil, nil
+ },
+}
+
+// handle processes request, it finds a lease associated with MAC address and
+// prepares response.
+//
+// Possible return values are:
+// - "1": OK,
+// - "0": error, reply with Nak,
+// - "-1": error, don't reply.
+func (s *v4Server) handle(req, resp *dhcpv4.DHCPv4) (rCode int) {
var err error
// Include server's identifier option since any reply should contain it.
@@ -1007,47 +1076,26 @@ func (s *v4Server) handle(req, resp *dhcpv4.DHCPv4) int {
// See https://datatracker.ietf.org/doc/html/rfc2131#page-29.
resp.UpdateOption(dhcpv4.OptServerIdentifier(s.conf.dnsIPAddrs[0].AsSlice()))
- // TODO(a.garipov): Refactor this into handlers.
- var l *Lease
- switch mt := req.MessageType(); mt {
- case dhcpv4.MessageTypeDiscover:
- l, err = s.handleDiscover(req, resp)
- if err != nil {
- log.Error("dhcpv4: handling discover: %s", err)
+ handler := messageHandlers[req.MessageType()]
+ if handler == nil {
+ s.updateOptions(req, resp)
- return 0
- }
+ return 1
+ }
- if l == nil {
- return 0
- }
- case dhcpv4.MessageTypeRequest:
- var toReply bool
- l, toReply = s.handleRequest(req, resp)
- if l == nil {
- if toReply {
- return 0
- }
- return -1 // drop packet
- }
- case dhcpv4.MessageTypeDecline:
- err = s.handleDecline(req, resp)
- if err != nil {
- log.Error("dhcpv4: handling decline: %s", err)
+ rCode, l, err := handler(s, req, resp)
+ if err != nil {
+ log.Error("dhcpv4: %s", err)
- return 0
- }
- case dhcpv4.MessageTypeRelease:
- err = s.handleRelease(req, resp)
- if err != nil {
- log.Error("dhcpv4: handling release: %s", err)
+ return 0
+ }
- return 0
- }
+ if rCode != 1 {
+ return rCode
}
if l != nil {
- resp.YourIPAddr = net.IP(l.IP.AsSlice())
+ resp.YourIPAddr = l.IP.AsSlice()
}
s.updateOptions(req, resp)
@@ -1162,23 +1210,8 @@ func (s *v4Server) Start() (err error) {
// No available IP addresses which may appear later.
return nil
}
- // Update the value of Domain Name Server option separately from others if
- // not assigned yet since its value is available only at server's start.
- //
- // TODO(e.burkov): Initialize as implicit option with the rest of default
- // options when it will be possible to do before the call to Start.
- if !s.explicitOpts.Has(dhcpv4.OptionDomainNameServer) {
- s.implicitOpts.Update(dhcpv4.OptDNS(dnsIPAddrs...))
- }
- for _, ip := range dnsIPAddrs {
- ip = ip.To4()
- if ip == nil {
- continue
- }
-
- s.conf.dnsIPAddrs = append(s.conf.dnsIPAddrs, netip.AddrFrom4(*(*[4]byte)(ip)))
- }
+ s.configureDNSIPAddrs(dnsIPAddrs)
var c net.PacketConn
if c, err = s.newDHCPConn(iface); err != nil {
@@ -1199,10 +1232,10 @@ func (s *v4Server) Start() (err error) {
log.Info("dhcpv4: listening")
go func() {
- if serr := s.srv.Serve(); errors.Is(serr, net.ErrClosed) {
+ if sErr := s.srv.Serve(); errors.Is(sErr, net.ErrClosed) {
log.Info("dhcpv4: server is closed")
- } else if serr != nil {
- log.Error("dhcpv4: srv.Serve: %s", serr)
+ } else if sErr != nil {
+ log.Error("dhcpv4: srv.Serve: %s", sErr)
}
}()
@@ -1213,6 +1246,28 @@ func (s *v4Server) Start() (err error) {
return nil
}
+// configureDNSIPAddrs updates v4Server configuration with provided slice of
+// dns IP addresses.
+func (s *v4Server) configureDNSIPAddrs(dnsIPAddrs []net.IP) {
+ // Update the value of Domain Name Server option separately from others if
+ // not assigned yet since its value is available only at server's start.
+ //
+ // TODO(e.burkov): Initialize as implicit option with the rest of default
+ // options when it will be possible to do before the call to Start.
+ if !s.explicitOpts.Has(dhcpv4.OptionDomainNameServer) {
+ s.implicitOpts.Update(dhcpv4.OptDNS(dnsIPAddrs...))
+ }
+
+ for _, ip := range dnsIPAddrs {
+ vAddr, err := netutil.IPToAddr(ip, netutil.AddrFamilyIPv4)
+ if err != nil {
+ continue
+ }
+
+ s.conf.dnsIPAddrs = append(s.conf.dnsIPAddrs, vAddr)
+ }
+}
+
// Stop - stop server
func (s *v4Server) Stop() (err error) {
if s.srv == nil {
diff --git a/internal/dhcpd/v4_unix_test.go b/internal/dhcpd/v4_unix_test.go
index a5ce5e0e..162b5b88 100644
--- a/internal/dhcpd/v4_unix_test.go
+++ b/internal/dhcpd/v4_unix_test.go
@@ -227,7 +227,7 @@ func TestV4Server_AddRemove_static(t *testing.T) {
},
name: "with_gateway_ip",
wantErrMsg: "dhcpv4: adding static lease: " +
- "can't assign the gateway IP 192.168.10.1 to the lease",
+ `can't assign the gateway IP "192.168.10.1" to the lease`,
}, {
lease: &Lease{
Hostname: "ip6.local",
@@ -236,7 +236,7 @@ func TestV4Server_AddRemove_static(t *testing.T) {
},
name: "ipv6",
wantErrMsg: `dhcpv4: adding static lease: ` +
- `invalid ip "ffff::1", only ipv4 is supported`,
+ `invalid IP "ffff::1": only IPv4 is supported`,
}, {
lease: &Lease{
Hostname: "bad-mac.local",
diff --git a/internal/dhcpd/v6_unix.go b/internal/dhcpd/v6_unix.go
index cbe67eaa..fa3640f9 100644
--- a/internal/dhcpd/v6_unix.go
+++ b/internal/dhcpd/v6_unix.go
@@ -30,7 +30,7 @@ type v6Server struct {
leasesLock sync.Mutex
leases []*Lease
ipAddrs [256]byte
- sid dhcpv6.Duid
+ sid dhcpv6.DUID
ra raCtx // RA module
@@ -586,9 +586,31 @@ func (s *v6Server) packetHandler(conn net.PacketConn, peer net.Addr, req dhcpv6.
}
}
-// initialize RA module
-func (s *v6Server) initRA(iface *net.Interface) error {
- // choose the source IP address - should be link-local-unicast
+// configureDNSIPAddrs updates v6Server configuration with the slice of DNS IP
+// addresses of provided interface iface. Initializes RA module.
+func (s *v6Server) configureDNSIPAddrs(iface *net.Interface) (ok bool, err error) {
+ dnsIPAddrs, err := aghnet.IfaceDNSIPAddrs(
+ iface,
+ aghnet.IPVersion6,
+ defaultMaxAttempts,
+ defaultBackoff,
+ )
+ if err != nil {
+ return false, fmt.Errorf("interface %s: %w", iface.Name, err)
+ }
+
+ if len(dnsIPAddrs) == 0 {
+ return false, nil
+ }
+
+ s.conf.dnsIPAddrs = dnsIPAddrs
+
+ return true, s.initRA(iface)
+}
+
+// initRA initializes RA module.
+func (s *v6Server) initRA(iface *net.Interface) (err error) {
+ // Choose the source IP address - should be link-local-unicast.
s.ra.ipAddr = s.conf.dnsIPAddrs[0]
for _, ip := range s.conf.dnsIPAddrs {
if ip.IsLinkLocalUnicast() {
@@ -604,6 +626,7 @@ func (s *v6Server) initRA(iface *net.Interface) error {
s.ra.ifaceName = s.conf.InterfaceName
s.ra.iface = iface
s.ra.packetSendPeriod = 1 * time.Second
+
return s.ra.Init()
}
@@ -623,63 +646,47 @@ func (s *v6Server) Start() (err error) {
log.Debug("dhcpv6: starting...")
- dnsIPAddrs, err := aghnet.IfaceDNSIPAddrs(
- iface,
- aghnet.IPVersion6,
- defaultMaxAttempts,
- defaultBackoff,
- )
+ ok, err := s.configureDNSIPAddrs(iface)
if err != nil {
- return fmt.Errorf("interface %s: %w", ifaceName, err)
+ // Don't wrap the error, because it's informative enough as is.
+ return err
}
- if len(dnsIPAddrs) == 0 {
+ if !ok {
// No available IP addresses which may appear later.
return nil
}
- s.conf.dnsIPAddrs = dnsIPAddrs
-
- err = s.initRA(iface)
- if err != nil {
- return err
- }
-
- // don't initialize DHCPv6 server if we must force the clients to use SLAAC
+ // Don't initialize DHCPv6 server if we must force the clients to use SLAAC.
if s.conf.RASLAACOnly {
log.Debug("not starting dhcpv6 server due to ra_slaac_only=true")
return nil
}
- log.Debug("dhcpv6: listening...")
-
err = netutil.ValidateMAC(iface.HardwareAddr)
if err != nil {
return fmt.Errorf("validating interface %s: %w", iface.Name, err)
}
- s.sid = dhcpv6.Duid{
- Type: dhcpv6.DUID_LLT,
- HwType: iana.HWTypeEthernet,
+ s.sid = &dhcpv6.DUIDLLT{
+ HWType: iana.HWTypeEthernet,
LinkLayerAddr: iface.HardwareAddr,
Time: dhcpv6.GetTime(),
}
- laddr := &net.UDPAddr{
- IP: net.ParseIP("::"),
- Port: dhcpv6.DefaultServerPort,
- }
- s.srv, err = server6.NewServer(iface.Name, laddr, s.packetHandler, server6.WithDebugLogger())
+ s.srv, err = server6.NewServer(iface.Name, nil, s.packetHandler, server6.WithDebugLogger())
if err != nil {
return err
}
+ log.Debug("dhcpv6: listening...")
+
go func() {
- if serr := s.srv.Serve(); errors.Is(serr, net.ErrClosed) {
+ if sErr := s.srv.Serve(); errors.Is(sErr, net.ErrClosed) {
log.Info("dhcpv6: server is closed")
- } else if serr != nil {
- log.Error("dhcpv6: srv.Serve: %s", serr)
+ } else if sErr != nil {
+ log.Error("dhcpv6: srv.Serve: %s", sErr)
}
}()
diff --git a/internal/dhcpd/v6_unix_test.go b/internal/dhcpd/v6_unix_test.go
index c5034e47..3ed5221a 100644
--- a/internal/dhcpd/v6_unix_test.go
+++ b/internal/dhcpd/v6_unix_test.go
@@ -121,9 +121,8 @@ func TestV6GetLease(t *testing.T) {
dnsAddr := net.ParseIP("2000::1")
s.conf.dnsIPAddrs = []net.IP{dnsAddr}
- s.sid = dhcpv6.Duid{
- Type: dhcpv6.DUID_LLT,
- HwType: iana.HWTypeEthernet,
+ s.sid = &dhcpv6.DUIDLL{
+ HWType: iana.HWTypeEthernet,
LinkLayerAddr: net.HardwareAddr{0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA},
}
@@ -216,9 +215,8 @@ func TestV6GetDynamicLease(t *testing.T) {
dnsAddr := net.ParseIP("2000::1")
s.conf.dnsIPAddrs = []net.IP{dnsAddr}
- s.sid = dhcpv6.Duid{
- Type: dhcpv6.DUID_LLT,
- HwType: iana.HWTypeEthernet,
+ s.sid = &dhcpv6.DUIDLL{
+ HWType: iana.HWTypeEthernet,
LinkLayerAddr: net.HardwareAddr{0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA},
}
diff --git a/internal/dnsforward/dnsforward_test.go b/internal/dnsforward/dnsforward_test.go
index ce8b1cf2..f7ff57a3 100644
--- a/internal/dnsforward/dnsforward_test.go
+++ b/internal/dnsforward/dnsforward_test.go
@@ -23,6 +23,7 @@ import (
"github.com/AdguardTeam/AdGuardHome/internal/aghtest"
"github.com/AdguardTeam/AdGuardHome/internal/dhcpd"
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
+ "github.com/AdguardTeam/AdGuardHome/internal/filtering/hashprefix"
"github.com/AdguardTeam/AdGuardHome/internal/filtering/safesearch"
"github.com/AdguardTeam/dnsproxy/proxy"
"github.com/AdguardTeam/dnsproxy/upstream"
@@ -915,13 +916,23 @@ func TestBlockedByHosts(t *testing.T) {
}
func TestBlockedBySafeBrowsing(t *testing.T) {
- const hostname = "wmconvirus.narod.ru"
+ const (
+ hostname = "wmconvirus.narod.ru"
+ cacheTime = 10 * time.Minute
+ cacheSize = 10000
+ )
+
+ sbChecker := hashprefix.New(&hashprefix.Config{
+ CacheTime: cacheTime,
+ CacheSize: cacheSize,
+ Upstream: aghtest.NewBlockUpstream(hostname, true),
+ })
- sbUps := aghtest.NewBlockUpstream(hostname, true)
ans4, _ := (&aghtest.TestResolver{}).HostToIPs(hostname)
filterConf := &filtering.Config{
SafeBrowsingEnabled: true,
+ SafeBrowsingChecker: sbChecker,
}
forwardConf := ServerConfig{
UDPListenAddrs: []*net.UDPAddr{{}},
@@ -935,7 +946,6 @@ func TestBlockedBySafeBrowsing(t *testing.T) {
},
}
s := createTestServer(t, filterConf, forwardConf, nil)
- s.dnsFilter.SetSafeBrowsingUpstream(sbUps)
startDeferStop(t, s)
addr := s.dnsProxy.Addr(proxy.ProtoUDP)
diff --git a/internal/dnsforward/http_test.go b/internal/dnsforward/http_test.go
index 622f7cb8..c9846ae4 100644
--- a/internal/dnsforward/http_test.go
+++ b/internal/dnsforward/http_test.go
@@ -205,8 +205,8 @@ func TestDNSForwardHTTP_handleSetConfig(t *testing.T) {
wantSet: `validating upstream servers: validating upstream "!!!": not an ip:port`,
}, {
name: "bootstraps_bad",
- wantSet: `checking bootstrap a: invalid address: ` +
- `Resolver a is not eligible to be a bootstrap DNS server`,
+ wantSet: `checking bootstrap a: invalid address: bootstrap a:53: ` +
+ `ParseAddr("a"): unable to parse IP`,
}, {
name: "cache_bad_ttl",
wantSet: `cache_ttl_min must be less or equal than cache_ttl_max`,
@@ -487,7 +487,8 @@ func TestServer_handleTestUpstreaDNS(t *testing.T) {
},
wantResp: map[string]any{
badUps: `upstream "` + badUps + `" fails to exchange: ` +
- `couldn't communicate with upstream: dns: id mismatch`,
+ `couldn't communicate with upstream: exchanging with ` +
+ badUps + ` over tcp: dns: id mismatch`,
},
name: "broken",
}, {
@@ -497,7 +498,8 @@ func TestServer_handleTestUpstreaDNS(t *testing.T) {
wantResp: map[string]any{
goodUps: "OK",
badUps: `upstream "` + badUps + `" fails to exchange: ` +
- `couldn't communicate with upstream: dns: id mismatch`,
+ `couldn't communicate with upstream: exchanging with ` +
+ badUps + ` over tcp: dns: id mismatch`,
},
name: "both",
}}
diff --git a/internal/filtering/filtering.go b/internal/filtering/filtering.go
index 4da26616..5c30c645 100644
--- a/internal/filtering/filtering.go
+++ b/internal/filtering/filtering.go
@@ -18,8 +18,6 @@ import (
"github.com/AdguardTeam/AdGuardHome/internal/aghhttp"
"github.com/AdguardTeam/AdGuardHome/internal/aghnet"
- "github.com/AdguardTeam/dnsproxy/upstream"
- "github.com/AdguardTeam/golibs/cache"
"github.com/AdguardTeam/golibs/errors"
"github.com/AdguardTeam/golibs/log"
"github.com/AdguardTeam/golibs/mathutil"
@@ -75,6 +73,12 @@ type Resolver interface {
// Config allows you to configure DNS filtering with New() or just change variables directly.
type Config struct {
+ // SafeBrowsingChecker is the safe browsing hash-prefix checker.
+ SafeBrowsingChecker Checker `yaml:"-"`
+
+ // ParentControl is the parental control hash-prefix checker.
+ ParentalControlChecker Checker `yaml:"-"`
+
// enabled is used to be returned within Settings.
//
// It is of type uint32 to be accessed by atomic.
@@ -158,8 +162,22 @@ type hostChecker struct {
name string
}
+// Checker is used for safe browsing or parental control hash-prefix filtering.
+type Checker interface {
+ // Check returns true if request for the host should be blocked.
+ Check(host string) (block bool, err error)
+}
+
// DNSFilter matches hostnames and DNS requests against filtering rules.
type DNSFilter struct {
+ safeSearch SafeSearch
+
+ // safeBrowsingChecker is the safe browsing hash-prefix checker.
+ safeBrowsingChecker Checker
+
+ // parentalControl is the parental control hash-prefix checker.
+ parentalControlChecker Checker
+
rulesStorage *filterlist.RuleStorage
filteringEngine *urlfilter.DNSEngine
@@ -168,14 +186,6 @@ type DNSFilter struct {
engineLock sync.RWMutex
- parentalServer string // access via methods
- safeBrowsingServer string // access via methods
- parentalUpstream upstream.Upstream
- safeBrowsingUpstream upstream.Upstream
-
- safebrowsingCache cache.Cache
- parentalCache cache.Cache
-
Config // for direct access by library users, even a = assignment
// confLock protects Config.
confLock sync.RWMutex
@@ -192,7 +202,6 @@ type DNSFilter struct {
// TODO(e.burkov): Don't use regexp for such a simple text processing task.
filterTitleRegexp *regexp.Regexp
- safeSearch SafeSearch
hostCheckers []hostChecker
}
@@ -940,19 +949,12 @@ func InitModule() {
// be non-nil.
func New(c *Config, blockFilters []Filter) (d *DNSFilter, err error) {
d = &DNSFilter{
- refreshLock: &sync.Mutex{},
- filterTitleRegexp: regexp.MustCompile(`^! Title: +(.*)$`),
+ refreshLock: &sync.Mutex{},
+ filterTitleRegexp: regexp.MustCompile(`^! Title: +(.*)$`),
+ safeBrowsingChecker: c.SafeBrowsingChecker,
+ parentalControlChecker: c.ParentalControlChecker,
}
- d.safebrowsingCache = cache.New(cache.Config{
- EnableLRU: true,
- MaxSize: c.SafeBrowsingCacheSize,
- })
- d.parentalCache = cache.New(cache.Config{
- EnableLRU: true,
- MaxSize: c.ParentalCacheSize,
- })
-
d.safeSearch = c.SafeSearch
d.hostCheckers = []hostChecker{{
@@ -977,11 +979,6 @@ func New(c *Config, blockFilters []Filter) (d *DNSFilter, err error) {
defer func() { err = errors.Annotate(err, "filtering: %w") }()
- err = d.initSecurityServices()
- if err != nil {
- return nil, fmt.Errorf("initializing services: %s", err)
- }
-
d.Config = *c
d.filtersMu = &sync.RWMutex{}
@@ -1038,3 +1035,69 @@ func (d *DNSFilter) Start() {
// So for now we just start this periodic task from here.
go d.periodicallyRefreshFilters()
}
+
+// Safe browsing and parental control methods.
+
+// TODO(a.garipov): Unify with checkParental.
+func (d *DNSFilter) checkSafeBrowsing(
+ host string,
+ _ uint16,
+ setts *Settings,
+) (res Result, err error) {
+ if !setts.ProtectionEnabled || !setts.SafeBrowsingEnabled {
+ return Result{}, nil
+ }
+
+ if log.GetLevel() >= log.DEBUG {
+ timer := log.StartTimer()
+ defer timer.LogElapsed("safebrowsing lookup for %q", host)
+ }
+
+ res = Result{
+ Rules: []*ResultRule{{
+ Text: "adguard-malware-shavar",
+ FilterListID: SafeBrowsingListID,
+ }},
+ Reason: FilteredSafeBrowsing,
+ IsFiltered: true,
+ }
+
+ block, err := d.safeBrowsingChecker.Check(host)
+ if !block || err != nil {
+ return Result{}, err
+ }
+
+ return res, nil
+}
+
+// TODO(a.garipov): Unify with checkSafeBrowsing.
+func (d *DNSFilter) checkParental(
+ host string,
+ _ uint16,
+ setts *Settings,
+) (res Result, err error) {
+ if !setts.ProtectionEnabled || !setts.ParentalEnabled {
+ return Result{}, nil
+ }
+
+ if log.GetLevel() >= log.DEBUG {
+ timer := log.StartTimer()
+ defer timer.LogElapsed("parental lookup for %q", host)
+ }
+
+ res = Result{
+ Rules: []*ResultRule{{
+ Text: "parental CATEGORY_BLACKLISTED",
+ FilterListID: ParentalListID,
+ }},
+ Reason: FilteredParental,
+ IsFiltered: true,
+ }
+
+ block, err := d.parentalControlChecker.Check(host)
+ if !block || err != nil {
+ return Result{}, err
+ }
+
+ return res, nil
+}
diff --git a/internal/filtering/filtering_test.go b/internal/filtering/filtering_test.go
index 17cbebfb..8636606b 100644
--- a/internal/filtering/filtering_test.go
+++ b/internal/filtering/filtering_test.go
@@ -7,7 +7,7 @@ import (
"testing"
"github.com/AdguardTeam/AdGuardHome/internal/aghtest"
- "github.com/AdguardTeam/golibs/cache"
+ "github.com/AdguardTeam/AdGuardHome/internal/filtering/hashprefix"
"github.com/AdguardTeam/golibs/log"
"github.com/AdguardTeam/golibs/testutil"
"github.com/AdguardTeam/urlfilter/rules"
@@ -27,17 +27,6 @@ const (
// Helpers.
-func purgeCaches(d *DNSFilter) {
- for _, c := range []cache.Cache{
- d.safebrowsingCache,
- d.parentalCache,
- } {
- if c != nil {
- c.Clear()
- }
- }
-}
-
func newForTest(t testing.TB, c *Config, filters []Filter) (f *DNSFilter, setts *Settings) {
setts = &Settings{
ProtectionEnabled: true,
@@ -58,11 +47,17 @@ func newForTest(t testing.TB, c *Config, filters []Filter) (f *DNSFilter, setts
f, err := New(c, filters)
require.NoError(t, err)
- purgeCaches(f)
-
return f, setts
}
+func newChecker(host string) Checker {
+ return hashprefix.New(&hashprefix.Config{
+ CacheTime: 10,
+ CacheSize: 100000,
+ Upstream: aghtest.NewBlockUpstream(host, true),
+ })
+}
+
func (d *DNSFilter) checkMatch(t *testing.T, hostname string, setts *Settings) {
t.Helper()
@@ -175,10 +170,14 @@ func TestSafeBrowsing(t *testing.T) {
aghtest.ReplaceLogWriter(t, logOutput)
aghtest.ReplaceLogLevel(t, log.DEBUG)
- d, setts := newForTest(t, &Config{SafeBrowsingEnabled: true}, nil)
+ sbChecker := newChecker(sbBlocked)
+
+ d, setts := newForTest(t, &Config{
+ SafeBrowsingEnabled: true,
+ SafeBrowsingChecker: sbChecker,
+ }, nil)
t.Cleanup(d.Close)
- d.SetSafeBrowsingUpstream(aghtest.NewBlockUpstream(sbBlocked, true))
d.checkMatch(t, sbBlocked, setts)
require.Contains(t, logOutput.String(), fmt.Sprintf("safebrowsing lookup for %q", sbBlocked))
@@ -188,18 +187,17 @@ func TestSafeBrowsing(t *testing.T) {
d.checkMatchEmpty(t, pcBlocked, setts)
// Cached result.
- d.safeBrowsingServer = "127.0.0.1"
d.checkMatch(t, sbBlocked, setts)
d.checkMatchEmpty(t, pcBlocked, setts)
- d.safeBrowsingServer = defaultSafebrowsingServer
}
func TestParallelSB(t *testing.T) {
- d, setts := newForTest(t, &Config{SafeBrowsingEnabled: true}, nil)
+ d, setts := newForTest(t, &Config{
+ SafeBrowsingEnabled: true,
+ SafeBrowsingChecker: newChecker(sbBlocked),
+ }, nil)
t.Cleanup(d.Close)
- d.SetSafeBrowsingUpstream(aghtest.NewBlockUpstream(sbBlocked, true))
-
t.Run("group", func(t *testing.T) {
for i := 0; i < 100; i++ {
t.Run(fmt.Sprintf("aaa%d", i), func(t *testing.T) {
@@ -220,10 +218,12 @@ func TestParentalControl(t *testing.T) {
aghtest.ReplaceLogWriter(t, logOutput)
aghtest.ReplaceLogLevel(t, log.DEBUG)
- d, setts := newForTest(t, &Config{ParentalEnabled: true}, nil)
+ d, setts := newForTest(t, &Config{
+ ParentalEnabled: true,
+ ParentalControlChecker: newChecker(pcBlocked),
+ }, nil)
t.Cleanup(d.Close)
- d.SetParentalUpstream(aghtest.NewBlockUpstream(pcBlocked, true))
d.checkMatch(t, pcBlocked, setts)
require.Contains(t, logOutput.String(), fmt.Sprintf("parental lookup for %q", pcBlocked))
@@ -233,7 +233,6 @@ func TestParentalControl(t *testing.T) {
d.checkMatchEmpty(t, "api.jquery.com", setts)
// Test cached result.
- d.parentalServer = "127.0.0.1"
d.checkMatch(t, pcBlocked, setts)
d.checkMatchEmpty(t, "yandex.ru", setts)
}
@@ -593,8 +592,10 @@ func applyClientSettings(setts *Settings) {
func TestClientSettings(t *testing.T) {
d, setts := newForTest(t,
&Config{
- ParentalEnabled: true,
- SafeBrowsingEnabled: false,
+ ParentalEnabled: true,
+ SafeBrowsingEnabled: false,
+ SafeBrowsingChecker: newChecker(sbBlocked),
+ ParentalControlChecker: newChecker(pcBlocked),
},
[]Filter{{
ID: 0, Data: []byte("||example.org^\n"),
@@ -602,9 +603,6 @@ func TestClientSettings(t *testing.T) {
)
t.Cleanup(d.Close)
- d.SetParentalUpstream(aghtest.NewBlockUpstream(pcBlocked, true))
- d.SetSafeBrowsingUpstream(aghtest.NewBlockUpstream(sbBlocked, true))
-
type testCase struct {
name string
host string
@@ -665,11 +663,12 @@ func TestClientSettings(t *testing.T) {
// Benchmarks.
func BenchmarkSafeBrowsing(b *testing.B) {
- d, setts := newForTest(b, &Config{SafeBrowsingEnabled: true}, nil)
+ d, setts := newForTest(b, &Config{
+ SafeBrowsingEnabled: true,
+ SafeBrowsingChecker: newChecker(sbBlocked),
+ }, nil)
b.Cleanup(d.Close)
- d.SetSafeBrowsingUpstream(aghtest.NewBlockUpstream(sbBlocked, true))
-
for n := 0; n < b.N; n++ {
res, err := d.CheckHost(sbBlocked, dns.TypeA, setts)
require.NoError(b, err)
@@ -679,11 +678,12 @@ func BenchmarkSafeBrowsing(b *testing.B) {
}
func BenchmarkSafeBrowsingParallel(b *testing.B) {
- d, setts := newForTest(b, &Config{SafeBrowsingEnabled: true}, nil)
+ d, setts := newForTest(b, &Config{
+ SafeBrowsingEnabled: true,
+ SafeBrowsingChecker: newChecker(sbBlocked),
+ }, nil)
b.Cleanup(d.Close)
- d.SetSafeBrowsingUpstream(aghtest.NewBlockUpstream(sbBlocked, true))
-
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
res, err := d.CheckHost(sbBlocked, dns.TypeA, setts)
diff --git a/internal/filtering/hashprefix/cache.go b/internal/filtering/hashprefix/cache.go
new file mode 100644
index 00000000..a4eedec9
--- /dev/null
+++ b/internal/filtering/hashprefix/cache.go
@@ -0,0 +1,130 @@
+package hashprefix
+
+import (
+ "encoding/binary"
+ "time"
+
+ "github.com/AdguardTeam/golibs/log"
+)
+
+// expirySize is the size of expiry in cacheItem.
+const expirySize = 8
+
+// cacheItem represents an item that we will store in the cache.
+type cacheItem struct {
+ // expiry is the time when cacheItem will expire.
+ expiry time.Time
+
+ // hashes is the hashed hostnames.
+ hashes []hostnameHash
+}
+
+// toCacheItem decodes cacheItem from data. data must be at least equal to
+// expiry size.
+func toCacheItem(data []byte) *cacheItem {
+ t := time.Unix(int64(binary.BigEndian.Uint64(data)), 0)
+
+ data = data[expirySize:]
+ hashes := make([]hostnameHash, len(data)/hashSize)
+
+ for i := 0; i < len(data); i += hashSize {
+ var hash hostnameHash
+ copy(hash[:], data[i:i+hashSize])
+ hashes = append(hashes, hash)
+ }
+
+ return &cacheItem{
+ expiry: t,
+ hashes: hashes,
+ }
+}
+
+// fromCacheItem encodes cacheItem into data.
+func fromCacheItem(item *cacheItem) (data []byte) {
+ data = make([]byte, len(item.hashes)*hashSize+expirySize)
+ expiry := item.expiry.Unix()
+ binary.BigEndian.PutUint64(data[:expirySize], uint64(expiry))
+
+ for _, v := range item.hashes {
+ // nolint:looppointer // The subsilce is used for a copy.
+ data = append(data, v[:]...)
+ }
+
+ return data
+}
+
+// findInCache finds hashes in the cache. If nothing found returns list of
+// hashes, prefixes of which will be sent to upstream.
+func (c *Checker) findInCache(
+ hashes []hostnameHash,
+) (found, blocked bool, hashesToRequest []hostnameHash) {
+ now := time.Now()
+
+ i := 0
+ for _, hash := range hashes {
+ // nolint:looppointer // The subsilce is used for a safe cache lookup.
+ data := c.cache.Get(hash[:prefixLen])
+ if data == nil {
+ hashes[i] = hash
+ i++
+
+ continue
+ }
+
+ item := toCacheItem(data)
+ if now.After(item.expiry) {
+ hashes[i] = hash
+ i++
+
+ continue
+ }
+
+ if ok := findMatch(hashes, item.hashes); ok {
+ return true, true, nil
+ }
+ }
+
+ if i == 0 {
+ return true, false, nil
+ }
+
+ return false, false, hashes[:i]
+}
+
+// storeInCache caches hashes.
+func (c *Checker) storeInCache(hashesToRequest, respHashes []hostnameHash) {
+ hashToStore := make(map[prefix][]hostnameHash)
+
+ for _, hash := range respHashes {
+ var pref prefix
+ // nolint:looppointer // The subsilce is used for a copy.
+ copy(pref[:], hash[:])
+
+ hashToStore[pref] = append(hashToStore[pref], hash)
+ }
+
+ for pref, hash := range hashToStore {
+ // nolint:looppointer // The subsilce is used for a safe cache lookup.
+ c.setCache(pref[:], hash)
+ }
+
+ for _, hash := range hashesToRequest {
+ // nolint:looppointer // The subsilce is used for a safe cache lookup.
+ pref := hash[:prefixLen]
+ val := c.cache.Get(pref)
+ if val == nil {
+ c.setCache(pref, nil)
+ }
+ }
+}
+
+// setCache stores hash in cache.
+func (c *Checker) setCache(pref []byte, hashes []hostnameHash) {
+ item := &cacheItem{
+ expiry: time.Now().Add(c.cacheTime),
+ hashes: hashes,
+ }
+
+ c.cache.Set(pref, fromCacheItem(item))
+ log.Debug("%s: stored in cache: %v", c.svc, pref)
+}
diff --git a/internal/filtering/hashprefix/hashprefix.go b/internal/filtering/hashprefix/hashprefix.go
new file mode 100644
index 00000000..ed0e3ae2
--- /dev/null
+++ b/internal/filtering/hashprefix/hashprefix.go
@@ -0,0 +1,245 @@
+// Package hashprefix used for safe browsing and parent control.
+package hashprefix
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/AdguardTeam/dnsproxy/upstream"
+ "github.com/AdguardTeam/golibs/cache"
+ "github.com/AdguardTeam/golibs/log"
+ "github.com/AdguardTeam/golibs/netutil"
+ "github.com/AdguardTeam/golibs/stringutil"
+ "github.com/miekg/dns"
+ "golang.org/x/exp/slices"
+ "golang.org/x/net/publicsuffix"
+)
+
+const (
+ // prefixLen is the length of the hash prefix of the filtered hostname.
+ prefixLen = 2
+
+ // hashSize is the size of hashed hostname.
+ hashSize = sha256.Size
+
+ // hexSize is the size of hexadecimal representation of hashed hostname.
+ hexSize = hashSize * 2
+)
+
+// prefix is the type of the SHA256 hash prefix used to match against the
+// domain-name database.
+type prefix [prefixLen]byte
+
+// hostnameHash is the hashed hostname.
+//
+// TODO(s.chzhen): Split into prefix and suffix.
+type hostnameHash [hashSize]byte
+
+// findMatch returns true if one of the a hostnames matches one of the b.
+func findMatch(a, b []hostnameHash) (matched bool) {
+ for _, hash := range a {
+ if slices.Contains(b, hash) {
+ return true
+ }
+ }
+
+ return false
+}
+
+// Config is the configuration structure for safe browsing and parental
+// control.
+type Config struct {
+ // Upstream is the upstream DNS server.
+ Upstream upstream.Upstream
+
+ // ServiceName is the name of the service.
+ ServiceName string
+
+ // TXTSuffix is the TXT suffix for DNS request.
+ TXTSuffix string
+
+ // CacheTime is the time period to store hash.
+ CacheTime time.Duration
+
+ // CacheSize is the maximum size of the cache. If it's zero, cache size is
+ // unlimited.
+ CacheSize uint
+}
+
+type Checker struct {
+ // upstream is the upstream DNS server.
+ upstream upstream.Upstream
+
+ // cache stores hostname hashes.
+ cache cache.Cache
+
+ // svc is the name of the service.
+ svc string
+
+ // txtSuffix is the TXT suffix for DNS request.
+ txtSuffix string
+
+ // cacheTime is the time period to store hash.
+ cacheTime time.Duration
+}
+
+// New returns Checker.
+func New(conf *Config) (c *Checker) {
+ return &Checker{
+ upstream: conf.Upstream,
+ cache: cache.New(cache.Config{
+ EnableLRU: true,
+ MaxSize: conf.CacheSize,
+ }),
+ svc: conf.ServiceName,
+ txtSuffix: conf.TXTSuffix,
+ cacheTime: conf.CacheTime,
+ }
+}
+
+// Check returns true if request for the host should be blocked.
+func (c *Checker) Check(host string) (ok bool, err error) {
+ hashes := hostnameToHashes(host)
+
+ found, blocked, hashesToRequest := c.findInCache(hashes)
+ if found {
+ log.Debug("%s: found %q in cache, blocked: %t", c.svc, host, blocked)
+
+ return blocked, nil
+ }
+
+ question := c.getQuestion(hashesToRequest)
+
+ log.Debug("%s: checking %s: %s", c.svc, host, question)
+ req := (&dns.Msg{}).SetQuestion(question, dns.TypeTXT)
+
+ resp, err := c.upstream.Exchange(req)
+ if err != nil {
+ return false, fmt.Errorf("getting hashes: %w", err)
+ }
+
+ matched, receivedHashes := c.processAnswer(hashesToRequest, resp, host)
+
+ c.storeInCache(hashesToRequest, receivedHashes)
+
+ return matched, nil
+}
+
+// hostnameToHashes returns hashes that should be checked by the hash prefix
+// filter.
+func hostnameToHashes(host string) (hashes []hostnameHash) {
+ // subDomainNum defines how many labels should be hashed to match against a
+ // hash prefix filter.
+ const subDomainNum = 4
+
+ pubSuf, icann := publicsuffix.PublicSuffix(host)
+ if !icann {
+ // Check the full private domain space.
+ pubSuf = ""
+ }
+
+ nDots := 0
+ i := strings.LastIndexFunc(host, func(r rune) (ok bool) {
+ if r == '.' {
+ nDots++
+ }
+
+ return nDots == subDomainNum
+ })
+ if i != -1 {
+ host = host[i+1:]
+ }
+
+ sub := netutil.Subdomains(host)
+
+ for _, s := range sub {
+ if s == pubSuf {
+ break
+ }
+
+ sum := sha256.Sum256([]byte(s))
+ hashes = append(hashes, sum)
+ }
+
+ return hashes
+}
+
+// getQuestion combines hexadecimal encoded prefixes of hashed hostnames into
+// string.
+func (c *Checker) getQuestion(hashes []hostnameHash) (q string) {
+ b := &strings.Builder{}
+
+ for _, hash := range hashes {
+ // nolint:looppointer // The subsilce is used for safe hex encoding.
+ stringutil.WriteToBuilder(b, hex.EncodeToString(hash[:prefixLen]), ".")
+ }
+
+ stringutil.WriteToBuilder(b, c.txtSuffix)
+
+ return b.String()
+}
+
+// processAnswer returns true if DNS response matches the hash, and received
+// hashed hostnames from the upstream.
+func (c *Checker) processAnswer(
+ hashesToRequest []hostnameHash,
+ resp *dns.Msg,
+ host string,
+) (matched bool, receivedHashes []hostnameHash) {
+ txtCount := 0
+
+ for _, a := range resp.Answer {
+ txt, ok := a.(*dns.TXT)
+ if !ok {
+ continue
+ }
+
+ txtCount++
+
+ receivedHashes = c.appendHashesFromTXT(receivedHashes, txt, host)
+ }
+
+ log.Debug("%s: received answer for %s with %d TXT count", c.svc, host, txtCount)
+
+ matched = findMatch(hashesToRequest, receivedHashes)
+ if matched {
+ log.Debug("%s: matched %s", c.svc, host)
+
+ return true, receivedHashes
+ }
+
+ return false, receivedHashes
+}
+
+// appendHashesFromTXT appends received hashed hostnames.
+func (c *Checker) appendHashesFromTXT(
+ hashes []hostnameHash,
+ txt *dns.TXT,
+ host string,
+) (receivedHashes []hostnameHash) {
+ log.Debug("%s: received hashes for %s: %v", c.svc, host, txt.Txt)
+
+ for _, t := range txt.Txt {
+ if len(t) != hexSize {
+ log.Debug("%s: wrong hex size %d for %s %s", c.svc, len(t), host, t)
+
+ continue
+ }
+
+ buf, err := hex.DecodeString(t)
+ if err != nil {
+ log.Debug("%s: decoding hex string %s: %s", c.svc, t, err)
+
+ continue
+ }
+
+ var hash hostnameHash
+ copy(hash[:], buf)
+ hashes = append(hashes, hash)
+ }
+
+ return hashes
+}
diff --git a/internal/filtering/hashprefix/hashprefix_internal_test.go b/internal/filtering/hashprefix/hashprefix_internal_test.go
new file mode 100644
index 00000000..7e724010
--- /dev/null
+++ b/internal/filtering/hashprefix/hashprefix_internal_test.go
@@ -0,0 +1,248 @@
+package hashprefix
+
+import (
+ "crypto/sha256"
+ "encoding/hex"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/AdguardTeam/AdGuardHome/internal/aghtest"
+ "github.com/AdguardTeam/golibs/cache"
+ "github.com/miekg/dns"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "golang.org/x/exp/slices"
+)
+
+const (
+ cacheTime = 10 * time.Minute
+ cacheSize = 10000
+)
+
+func TestChcker_getQuestion(t *testing.T) {
+ const suf = "sb.dns.adguard.com."
+
+ // test hostnameToHashes()
+ hashes := hostnameToHashes("1.2.3.sub.host.com")
+ assert.Len(t, hashes, 3)
+
+ hash := sha256.Sum256([]byte("3.sub.host.com"))
+ hexPref1 := hex.EncodeToString(hash[:prefixLen])
+ assert.True(t, slices.Contains(hashes, hash))
+
+ hash = sha256.Sum256([]byte("sub.host.com"))
+ hexPref2 := hex.EncodeToString(hash[:prefixLen])
+ assert.True(t, slices.Contains(hashes, hash))
+
+ hash = sha256.Sum256([]byte("host.com"))
+ hexPref3 := hex.EncodeToString(hash[:prefixLen])
+ assert.True(t, slices.Contains(hashes, hash))
+
+ hash = sha256.Sum256([]byte("com"))
+ assert.False(t, slices.Contains(hashes, hash))
+
+ c := &Checker{
+ svc: "SafeBrowsing",
+ txtSuffix: suf,
+ }
+
+ q := c.getQuestion(hashes)
+
+ assert.Contains(t, q, hexPref1)
+ assert.Contains(t, q, hexPref2)
+ assert.Contains(t, q, hexPref3)
+ assert.True(t, strings.HasSuffix(q, suf))
+}
+
+func TestHostnameToHashes(t *testing.T) {
+ testCases := []struct {
+ name string
+ host string
+ wantLen int
+ }{{
+ name: "basic",
+ host: "example.com",
+ wantLen: 1,
+ }, {
+ name: "sub_basic",
+ host: "www.example.com",
+ wantLen: 2,
+ }, {
+ name: "private_domain",
+ host: "foo.co.uk",
+ wantLen: 1,
+ }, {
+ name: "sub_private_domain",
+ host: "bar.foo.co.uk",
+ wantLen: 2,
+ }, {
+ name: "private_domain_v2",
+ host: "foo.blogspot.co.uk",
+ wantLen: 4,
+ }, {
+ name: "sub_private_domain_v2",
+ host: "bar.foo.blogspot.co.uk",
+ wantLen: 4,
+ }}
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ hashes := hostnameToHashes(tc.host)
+ assert.Len(t, hashes, tc.wantLen)
+ })
+ }
+}
+
+func TestChecker_storeInCache(t *testing.T) {
+ c := &Checker{
+ svc: "SafeBrowsing",
+ cacheTime: cacheTime,
+ }
+ conf := cache.Config{}
+ c.cache = cache.New(conf)
+
+ // store in cache hashes for "3.sub.host.com" and "host.com"
+ // and empty data for hash-prefix for "sub.host.com"
+ hashes := []hostnameHash{}
+ hash := sha256.Sum256([]byte("sub.host.com"))
+ hashes = append(hashes, hash)
+ var hashesArray []hostnameHash
+ hash4 := sha256.Sum256([]byte("3.sub.host.com"))
+ hashesArray = append(hashesArray, hash4)
+ hash2 := sha256.Sum256([]byte("host.com"))
+ hashesArray = append(hashesArray, hash2)
+ c.storeInCache(hashes, hashesArray)
+
+ // match "3.sub.host.com" or "host.com" from cache
+ hashes = []hostnameHash{}
+ hash = sha256.Sum256([]byte("3.sub.host.com"))
+ hashes = append(hashes, hash)
+ hash = sha256.Sum256([]byte("sub.host.com"))
+ hashes = append(hashes, hash)
+ hash = sha256.Sum256([]byte("host.com"))
+ hashes = append(hashes, hash)
+ found, blocked, _ := c.findInCache(hashes)
+ assert.True(t, found)
+ assert.True(t, blocked)
+
+ // match "sub.host.com" from cache
+ hashes = []hostnameHash{}
+ hash = sha256.Sum256([]byte("sub.host.com"))
+ hashes = append(hashes, hash)
+ found, blocked, _ = c.findInCache(hashes)
+ assert.True(t, found)
+ assert.False(t, blocked)
+
+ // Match "sub.host.com" from cache. Another hash for "host.example" is not
+ // in the cache, so get data for it from the server.
+ hashes = []hostnameHash{}
+ hash = sha256.Sum256([]byte("sub.host.com"))
+ hashes = append(hashes, hash)
+ hash = sha256.Sum256([]byte("host.example"))
+ hashes = append(hashes, hash)
+ found, _, hashesToRequest := c.findInCache(hashes)
+ assert.False(t, found)
+
+ hash = sha256.Sum256([]byte("sub.host.com"))
+ ok := slices.Contains(hashesToRequest, hash)
+ assert.False(t, ok)
+
+ hash = sha256.Sum256([]byte("host.example"))
+ ok = slices.Contains(hashesToRequest, hash)
+ assert.True(t, ok)
+
+ c = &Checker{
+ svc: "SafeBrowsing",
+ cacheTime: cacheTime,
+ }
+ c.cache = cache.New(cache.Config{})
+
+ hashes = []hostnameHash{}
+ hash = sha256.Sum256([]byte("sub.host.com"))
+ hashes = append(hashes, hash)
+
+ c.cache.Set(hash[:prefixLen], make([]byte, expirySize+hashSize))
+ found, _, _ = c.findInCache(hashes)
+ assert.False(t, found)
+}
+
+func TestChecker_Check(t *testing.T) {
+ const hostname = "example.org"
+
+ testCases := []struct {
+ name string
+ wantBlock bool
+ }{{
+ name: "sb_no_block",
+ wantBlock: false,
+ }, {
+ name: "sb_block",
+ wantBlock: true,
+ }, {
+ name: "pc_no_block",
+ wantBlock: false,
+ }, {
+ name: "pc_block",
+ wantBlock: true,
+ }}
+
+ for _, tc := range testCases {
+ c := New(&Config{
+ CacheTime: cacheTime,
+ CacheSize: cacheSize,
+ })
+
+ // Prepare the upstream.
+ ups := aghtest.NewBlockUpstream(hostname, tc.wantBlock)
+
+ var numReq int
+ onExchange := ups.OnExchange
+ ups.OnExchange = func(req *dns.Msg) (resp *dns.Msg, err error) {
+ numReq++
+
+ return onExchange(req)
+ }
+
+ c.upstream = ups
+
+ t.Run(tc.name, func(t *testing.T) {
+ // Firstly, check the request blocking.
+ hits := 0
+ res := false
+ res, err := c.Check(hostname)
+ require.NoError(t, err)
+
+ if tc.wantBlock {
+ assert.True(t, res)
+ hits++
+ } else {
+ require.False(t, res)
+ }
+
+ // Check the cache state, check the response is now cached.
+ assert.Equal(t, 1, c.cache.Stats().Count)
+ assert.Equal(t, hits, c.cache.Stats().Hit)
+
+ // There was one request to an upstream.
+ assert.Equal(t, 1, numReq)
+
+ // Now make the same request to check the cache was used.
+ res, err = c.Check(hostname)
+ require.NoError(t, err)
+
+ if tc.wantBlock {
+ assert.True(t, res)
+ } else {
+ require.False(t, res)
+ }
+
+ // Check the cache state, it should've been used.
+ assert.Equal(t, 1, c.cache.Stats().Count)
+ assert.Equal(t, hits+1, c.cache.Stats().Hit)
+
+ // Check that there were no additional requests.
+ assert.Equal(t, 1, numReq)
+ })
+ }
+}
diff --git a/internal/filtering/http.go b/internal/filtering/http.go
index 11f64283..e114be33 100644
--- a/internal/filtering/http.go
+++ b/internal/filtering/http.go
@@ -8,6 +8,7 @@ import (
"net/url"
"os"
"path/filepath"
+ "sync"
"time"
"github.com/AdguardTeam/AdGuardHome/internal/aghhttp"
@@ -458,6 +459,80 @@ func (d *DNSFilter) handleCheckHost(w http.ResponseWriter, r *http.Request) {
_ = aghhttp.WriteJSONResponse(w, r, resp)
}
+// setProtectedBool sets the value of a boolean pointer under a lock. l must
+// protect the value under ptr.
+//
+// TODO(e.burkov): Make it generic?
+func setProtectedBool(mu *sync.RWMutex, ptr *bool, val bool) {
+ mu.Lock()
+ defer mu.Unlock()
+
+ *ptr = val
+}
+
+// protectedBool gets the value of a boolean pointer under a read lock. l must
+// protect the value under ptr.
+//
+// TODO(e.burkov): Make it generic?
+func protectedBool(mu *sync.RWMutex, ptr *bool) (val bool) {
+ mu.RLock()
+ defer mu.RUnlock()
+
+ return *ptr
+}
+
+// handleSafeBrowsingEnable is the handler for the POST
+// /control/safebrowsing/enable HTTP API.
+func (d *DNSFilter) handleSafeBrowsingEnable(w http.ResponseWriter, r *http.Request) {
+ setProtectedBool(&d.confLock, &d.Config.SafeBrowsingEnabled, true)
+ d.Config.ConfigModified()
+}
+
+// handleSafeBrowsingDisable is the handler for the POST
+// /control/safebrowsing/disable HTTP API.
+func (d *DNSFilter) handleSafeBrowsingDisable(w http.ResponseWriter, r *http.Request) {
+ setProtectedBool(&d.confLock, &d.Config.SafeBrowsingEnabled, false)
+ d.Config.ConfigModified()
+}
+
+// handleSafeBrowsingStatus is the handler for the GET
+// /control/safebrowsing/status HTTP API.
+func (d *DNSFilter) handleSafeBrowsingStatus(w http.ResponseWriter, r *http.Request) {
+ resp := &struct {
+ Enabled bool `json:"enabled"`
+ }{
+ Enabled: protectedBool(&d.confLock, &d.Config.SafeBrowsingEnabled),
+ }
+
+ _ = aghhttp.WriteJSONResponse(w, r, resp)
+}
+
+// handleParentalEnable is the handler for the POST /control/parental/enable
+// HTTP API.
+func (d *DNSFilter) handleParentalEnable(w http.ResponseWriter, r *http.Request) {
+ setProtectedBool(&d.confLock, &d.Config.ParentalEnabled, true)
+ d.Config.ConfigModified()
+}
+
+// handleParentalDisable is the handler for the POST /control/parental/disable
+// HTTP API.
+func (d *DNSFilter) handleParentalDisable(w http.ResponseWriter, r *http.Request) {
+ setProtectedBool(&d.confLock, &d.Config.ParentalEnabled, false)
+ d.Config.ConfigModified()
+}
+
+// handleParentalStatus is the handler for the GET /control/parental/status
+// HTTP API.
+func (d *DNSFilter) handleParentalStatus(w http.ResponseWriter, r *http.Request) {
+ resp := &struct {
+ Enabled bool `json:"enabled"`
+ }{
+ Enabled: protectedBool(&d.confLock, &d.Config.ParentalEnabled),
+ }
+
+ _ = aghhttp.WriteJSONResponse(w, r, resp)
+}
+
// RegisterFilteringHandlers - register handlers
func (d *DNSFilter) RegisterFilteringHandlers() {
registerHTTP := d.HTTPRegister
diff --git a/internal/filtering/http_test.go b/internal/filtering/http_test.go
index df09c3f9..8330dac6 100644
--- a/internal/filtering/http_test.go
+++ b/internal/filtering/http_test.go
@@ -8,6 +8,7 @@ import (
"testing"
"time"
+ "github.com/AdguardTeam/golibs/testutil"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -136,3 +137,171 @@ func TestDNSFilter_handleFilteringSetURL(t *testing.T) {
})
}
}
+
+func TestDNSFilter_handleSafeBrowsingStatus(t *testing.T) {
+ const (
+ testTimeout = time.Second
+ statusURL = "/control/safebrowsing/status"
+ )
+
+ confModCh := make(chan struct{})
+ filtersDir := t.TempDir()
+
+ testCases := []struct {
+ name string
+ url string
+ enabled bool
+ wantStatus assert.BoolAssertionFunc
+ }{{
+ name: "enable_off",
+ url: "/control/safebrowsing/enable",
+ enabled: false,
+ wantStatus: assert.True,
+ }, {
+ name: "enable_on",
+ url: "/control/safebrowsing/enable",
+ enabled: true,
+ wantStatus: assert.True,
+ }, {
+ name: "disable_on",
+ url: "/control/safebrowsing/disable",
+ enabled: true,
+ wantStatus: assert.False,
+ }, {
+ name: "disable_off",
+ url: "/control/safebrowsing/disable",
+ enabled: false,
+ wantStatus: assert.False,
+ }}
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ handlers := make(map[string]http.Handler)
+
+ d, err := New(&Config{
+ ConfigModified: func() {
+ testutil.RequireSend(testutil.PanicT{}, confModCh, struct{}{}, testTimeout)
+ },
+ DataDir: filtersDir,
+ HTTPRegister: func(_, url string, handler http.HandlerFunc) {
+ handlers[url] = handler
+ },
+ SafeBrowsingEnabled: tc.enabled,
+ }, nil)
+ require.NoError(t, err)
+ t.Cleanup(d.Close)
+
+ d.RegisterFilteringHandlers()
+ require.NotEmpty(t, handlers)
+ require.Contains(t, handlers, statusURL)
+
+ r := httptest.NewRequest(http.MethodPost, tc.url, nil)
+ w := httptest.NewRecorder()
+
+ go handlers[tc.url].ServeHTTP(w, r)
+
+ testutil.RequireReceive(t, confModCh, testTimeout)
+
+ r = httptest.NewRequest(http.MethodGet, statusURL, nil)
+ w = httptest.NewRecorder()
+
+ handlers[statusURL].ServeHTTP(w, r)
+ require.Equal(t, http.StatusOK, w.Code)
+
+ status := struct {
+ Enabled bool `json:"enabled"`
+ }{
+ Enabled: false,
+ }
+
+ err = json.NewDecoder(w.Body).Decode(&status)
+ require.NoError(t, err)
+
+ tc.wantStatus(t, status.Enabled)
+ })
+ }
+}
+
+func TestDNSFilter_handleParentalStatus(t *testing.T) {
+ const (
+ testTimeout = time.Second
+ statusURL = "/control/parental/status"
+ )
+
+ confModCh := make(chan struct{})
+ filtersDir := t.TempDir()
+
+ testCases := []struct {
+ name string
+ url string
+ enabled bool
+ wantStatus assert.BoolAssertionFunc
+ }{{
+ name: "enable_off",
+ url: "/control/parental/enable",
+ enabled: false,
+ wantStatus: assert.True,
+ }, {
+ name: "enable_on",
+ url: "/control/parental/enable",
+ enabled: true,
+ wantStatus: assert.True,
+ }, {
+ name: "disable_on",
+ url: "/control/parental/disable",
+ enabled: true,
+ wantStatus: assert.False,
+ }, {
+ name: "disable_off",
+ url: "/control/parental/disable",
+ enabled: false,
+ wantStatus: assert.False,
+ }}
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ handlers := make(map[string]http.Handler)
+
+ d, err := New(&Config{
+ ConfigModified: func() {
+ testutil.RequireSend(testutil.PanicT{}, confModCh, struct{}{}, testTimeout)
+ },
+ DataDir: filtersDir,
+ HTTPRegister: func(_, url string, handler http.HandlerFunc) {
+ handlers[url] = handler
+ },
+ ParentalEnabled: tc.enabled,
+ }, nil)
+ require.NoError(t, err)
+ t.Cleanup(d.Close)
+
+ d.RegisterFilteringHandlers()
+ require.NotEmpty(t, handlers)
+ require.Contains(t, handlers, statusURL)
+
+ r := httptest.NewRequest(http.MethodPost, tc.url, nil)
+ w := httptest.NewRecorder()
+
+ go handlers[tc.url].ServeHTTP(w, r)
+
+ testutil.RequireReceive(t, confModCh, testTimeout)
+
+ r = httptest.NewRequest(http.MethodGet, statusURL, nil)
+ w = httptest.NewRecorder()
+
+ handlers[statusURL].ServeHTTP(w, r)
+ require.Equal(t, http.StatusOK, w.Code)
+
+ status := struct {
+ Enabled bool `json:"enabled"`
+ }{
+ Enabled: false,
+ }
+
+ err = json.NewDecoder(w.Body).Decode(&status)
+ require.NoError(t, err)
+
+ tc.wantStatus(t, status.Enabled)
+ })
+ }
+}
diff --git a/internal/filtering/safebrowsing.go b/internal/filtering/safebrowsing.go
deleted file mode 100644
index 3fb814d7..00000000
--- a/internal/filtering/safebrowsing.go
+++ /dev/null
@@ -1,433 +0,0 @@
-package filtering
-
-import (
- "bytes"
- "crypto/sha256"
- "encoding/binary"
- "encoding/hex"
- "fmt"
- "net"
- "net/http"
- "strings"
- "sync"
- "time"
-
- "github.com/AdguardTeam/AdGuardHome/internal/aghhttp"
- "github.com/AdguardTeam/dnsproxy/upstream"
- "github.com/AdguardTeam/golibs/cache"
- "github.com/AdguardTeam/golibs/log"
- "github.com/AdguardTeam/golibs/stringutil"
- "github.com/miekg/dns"
- "golang.org/x/exp/slices"
- "golang.org/x/net/publicsuffix"
-)
-
-// Safe browsing and parental control methods.
-
-// TODO(a.garipov): Make configurable.
-const (
- dnsTimeout = 3 * time.Second
- defaultSafebrowsingServer = `https://family.adguard-dns.com/dns-query`
- defaultParentalServer = `https://family.adguard-dns.com/dns-query`
- sbTXTSuffix = `sb.dns.adguard.com.`
- pcTXTSuffix = `pc.dns.adguard.com.`
-)
-
-// SetParentalUpstream sets the parental upstream for *DNSFilter.
-//
-// TODO(e.burkov): Remove this in v1 API to forbid the direct access.
-func (d *DNSFilter) SetParentalUpstream(u upstream.Upstream) {
- d.parentalUpstream = u
-}
-
-// SetSafeBrowsingUpstream sets the safe browsing upstream for *DNSFilter.
-//
-// TODO(e.burkov): Remove this in v1 API to forbid the direct access.
-func (d *DNSFilter) SetSafeBrowsingUpstream(u upstream.Upstream) {
- d.safeBrowsingUpstream = u
-}
-
-func (d *DNSFilter) initSecurityServices() error {
- var err error
- d.safeBrowsingServer = defaultSafebrowsingServer
- d.parentalServer = defaultParentalServer
- opts := &upstream.Options{
- Timeout: dnsTimeout,
- ServerIPAddrs: []net.IP{
- {94, 140, 14, 15},
- {94, 140, 15, 16},
- net.ParseIP("2a10:50c0::bad1:ff"),
- net.ParseIP("2a10:50c0::bad2:ff"),
- },
- }
-
- parUps, err := upstream.AddressToUpstream(d.parentalServer, opts)
- if err != nil {
- return fmt.Errorf("converting parental server: %w", err)
- }
- d.SetParentalUpstream(parUps)
-
- sbUps, err := upstream.AddressToUpstream(d.safeBrowsingServer, opts)
- if err != nil {
- return fmt.Errorf("converting safe browsing server: %w", err)
- }
- d.SetSafeBrowsingUpstream(sbUps)
-
- return nil
-}
-
-/*
-expire byte[4]
-hash byte[32]
-...
-*/
-func (c *sbCtx) setCache(prefix, hashes []byte) {
- d := make([]byte, 4+len(hashes))
- expire := uint(time.Now().Unix()) + c.cacheTime*60
- binary.BigEndian.PutUint32(d[:4], uint32(expire))
- copy(d[4:], hashes)
- c.cache.Set(prefix, d)
- log.Debug("%s: stored in cache: %v", c.svc, prefix)
-}
-
-// findInHash returns 32-byte hash if it's found in hashToHost.
-func (c *sbCtx) findInHash(val []byte) (hash32 [32]byte, found bool) {
- for i := 4; i < len(val); i += 32 {
- hash := val[i : i+32]
-
- copy(hash32[:], hash[0:32])
-
- _, found = c.hashToHost[hash32]
- if found {
- return hash32, found
- }
- }
-
- return [32]byte{}, false
-}
-
-func (c *sbCtx) getCached() int {
- now := time.Now().Unix()
- hashesToRequest := map[[32]byte]string{}
- for k, v := range c.hashToHost {
- // nolint:looppointer // The subsilce is used for a safe cache lookup.
- val := c.cache.Get(k[0:2])
- if val == nil || now >= int64(binary.BigEndian.Uint32(val)) {
- hashesToRequest[k] = v
- continue
- }
- if hash32, found := c.findInHash(val); found {
- log.Debug("%s: found in cache: %s: blocked by %v", c.svc, c.host, hash32)
- return 1
- }
- }
-
- if len(hashesToRequest) == 0 {
- log.Debug("%s: found in cache: %s: not blocked", c.svc, c.host)
- return -1
- }
-
- c.hashToHost = hashesToRequest
- return 0
-}
-
-type sbCtx struct {
- host string
- svc string
- hashToHost map[[32]byte]string
- cache cache.Cache
- cacheTime uint
-}
-
-func hostnameToHashes(host string) map[[32]byte]string {
- hashes := map[[32]byte]string{}
- tld, icann := publicsuffix.PublicSuffix(host)
- if !icann {
- // private suffixes like cloudfront.net
- tld = ""
- }
- curhost := host
-
- nDots := 0
- for i := len(curhost) - 1; i >= 0; i-- {
- if curhost[i] == '.' {
- nDots++
- if nDots == 4 {
- curhost = curhost[i+1:] // "xxx.a.b.c.d" -> "a.b.c.d"
- break
- }
- }
- }
-
- for {
- if curhost == "" {
- // we've reached end of string
- break
- }
- if tld != "" && curhost == tld {
- // we've reached the TLD, don't hash it
- break
- }
-
- sum := sha256.Sum256([]byte(curhost))
- hashes[sum] = curhost
-
- pos := strings.IndexByte(curhost, byte('.'))
- if pos < 0 {
- break
- }
- curhost = curhost[pos+1:]
- }
- return hashes
-}
-
-// convert hash array to string
-func (c *sbCtx) getQuestion() string {
- b := &strings.Builder{}
-
- for hash := range c.hashToHost {
- // nolint:looppointer // The subsilce is used for safe hex encoding.
- stringutil.WriteToBuilder(b, hex.EncodeToString(hash[0:2]), ".")
- }
-
- if c.svc == "SafeBrowsing" {
- stringutil.WriteToBuilder(b, sbTXTSuffix)
-
- return b.String()
- }
-
- stringutil.WriteToBuilder(b, pcTXTSuffix)
-
- return b.String()
-}
-
-// Find the target hash in TXT response
-func (c *sbCtx) processTXT(resp *dns.Msg) (bool, [][]byte) {
- matched := false
- hashes := [][]byte{}
- for _, a := range resp.Answer {
- txt, ok := a.(*dns.TXT)
- if !ok {
- continue
- }
- log.Debug("%s: received hashes for %s: %v", c.svc, c.host, txt.Txt)
-
- for _, t := range txt.Txt {
- if len(t) != 32*2 {
- continue
- }
- hash, err := hex.DecodeString(t)
- if err != nil {
- continue
- }
-
- hashes = append(hashes, hash)
-
- if !matched {
- var hash32 [32]byte
- copy(hash32[:], hash)
-
- var hashHost string
- hashHost, ok = c.hashToHost[hash32]
- if ok {
- log.Debug("%s: matched %s by %s/%s", c.svc, c.host, hashHost, t)
- matched = true
- }
- }
- }
- }
-
- return matched, hashes
-}
-
-func (c *sbCtx) storeCache(hashes [][]byte) {
- slices.SortFunc(hashes, func(a, b []byte) (sortsBefore bool) {
- return bytes.Compare(a, b) == -1
- })
-
- var curData []byte
- var prevPrefix []byte
- for i, hash := range hashes {
- // nolint:looppointer // The subsilce is used for a safe comparison.
- if !bytes.Equal(hash[0:2], prevPrefix) {
- if i != 0 {
- c.setCache(prevPrefix, curData)
- curData = nil
- }
- prevPrefix = hashes[i][0:2]
- }
- curData = append(curData, hash...)
- }
-
- if len(prevPrefix) != 0 {
- c.setCache(prevPrefix, curData)
- }
-
- for hash := range c.hashToHost {
- // nolint:looppointer // The subsilce is used for a safe cache lookup.
- prefix := hash[0:2]
- val := c.cache.Get(prefix)
- if val == nil {
- c.setCache(prefix, nil)
- }
- }
-}
-
-func check(c *sbCtx, r Result, u upstream.Upstream) (Result, error) {
- c.hashToHost = hostnameToHashes(c.host)
- switch c.getCached() {
- case -1:
- return Result{}, nil
- case 1:
- return r, nil
- }
-
- question := c.getQuestion()
-
- log.Tracef("%s: checking %s: %s", c.svc, c.host, question)
- req := (&dns.Msg{}).SetQuestion(question, dns.TypeTXT)
-
- resp, err := u.Exchange(req)
- if err != nil {
- return Result{}, err
- }
-
- matched, receivedHashes := c.processTXT(resp)
-
- c.storeCache(receivedHashes)
- if matched {
- return r, nil
- }
-
- return Result{}, nil
-}
-
-// TODO(a.garipov): Unify with checkParental.
-func (d *DNSFilter) checkSafeBrowsing(
- host string,
- _ uint16,
- setts *Settings,
-) (res Result, err error) {
- if !setts.ProtectionEnabled || !setts.SafeBrowsingEnabled {
- return Result{}, nil
- }
-
- if log.GetLevel() >= log.DEBUG {
- timer := log.StartTimer()
- defer timer.LogElapsed("safebrowsing lookup for %q", host)
- }
-
- sctx := &sbCtx{
- host: host,
- svc: "SafeBrowsing",
- cache: d.safebrowsingCache,
- cacheTime: d.Config.CacheTime,
- }
-
- res = Result{
- Rules: []*ResultRule{{
- Text: "adguard-malware-shavar",
- FilterListID: SafeBrowsingListID,
- }},
- Reason: FilteredSafeBrowsing,
- IsFiltered: true,
- }
-
- return check(sctx, res, d.safeBrowsingUpstream)
-}
-
-// TODO(a.garipov): Unify with checkSafeBrowsing.
-func (d *DNSFilter) checkParental(
- host string,
- _ uint16,
- setts *Settings,
-) (res Result, err error) {
- if !setts.ProtectionEnabled || !setts.ParentalEnabled {
- return Result{}, nil
- }
-
- if log.GetLevel() >= log.DEBUG {
- timer := log.StartTimer()
- defer timer.LogElapsed("parental lookup for %q", host)
- }
-
- sctx := &sbCtx{
- host: host,
- svc: "Parental",
- cache: d.parentalCache,
- cacheTime: d.Config.CacheTime,
- }
-
- res = Result{
- Rules: []*ResultRule{{
- Text: "parental CATEGORY_BLACKLISTED",
- FilterListID: ParentalListID,
- }},
- Reason: FilteredParental,
- IsFiltered: true,
- }
-
- return check(sctx, res, d.parentalUpstream)
-}
-
-// setProtectedBool sets the value of a boolean pointer under a lock. l must
-// protect the value under ptr.
-//
-// TODO(e.burkov): Make it generic?
-func setProtectedBool(mu *sync.RWMutex, ptr *bool, val bool) {
- mu.Lock()
- defer mu.Unlock()
-
- *ptr = val
-}
-
-// protectedBool gets the value of a boolean pointer under a read lock. l must
-// protect the value under ptr.
-//
-// TODO(e.burkov): Make it generic?
-func protectedBool(mu *sync.RWMutex, ptr *bool) (val bool) {
- mu.RLock()
- defer mu.RUnlock()
-
- return *ptr
-}
-
-func (d *DNSFilter) handleSafeBrowsingEnable(w http.ResponseWriter, r *http.Request) {
- setProtectedBool(&d.confLock, &d.Config.SafeBrowsingEnabled, true)
- d.Config.ConfigModified()
-}
-
-func (d *DNSFilter) handleSafeBrowsingDisable(w http.ResponseWriter, r *http.Request) {
- setProtectedBool(&d.confLock, &d.Config.SafeBrowsingEnabled, false)
- d.Config.ConfigModified()
-}
-
-func (d *DNSFilter) handleSafeBrowsingStatus(w http.ResponseWriter, r *http.Request) {
- resp := &struct {
- Enabled bool `json:"enabled"`
- }{
- Enabled: protectedBool(&d.confLock, &d.Config.SafeBrowsingEnabled),
- }
-
- _ = aghhttp.WriteJSONResponse(w, r, resp)
-}
-
-func (d *DNSFilter) handleParentalEnable(w http.ResponseWriter, r *http.Request) {
- setProtectedBool(&d.confLock, &d.Config.ParentalEnabled, true)
- d.Config.ConfigModified()
-}
-
-func (d *DNSFilter) handleParentalDisable(w http.ResponseWriter, r *http.Request) {
- setProtectedBool(&d.confLock, &d.Config.ParentalEnabled, false)
- d.Config.ConfigModified()
-}
-
-func (d *DNSFilter) handleParentalStatus(w http.ResponseWriter, r *http.Request) {
- resp := &struct {
- Enabled bool `json:"enabled"`
- }{
- Enabled: protectedBool(&d.confLock, &d.Config.ParentalEnabled),
- }
-
- _ = aghhttp.WriteJSONResponse(w, r, resp)
-}
diff --git a/internal/filtering/safebrowsing_test.go b/internal/filtering/safebrowsing_test.go
deleted file mode 100644
index a7abf878..00000000
--- a/internal/filtering/safebrowsing_test.go
+++ /dev/null
@@ -1,226 +0,0 @@
-package filtering
-
-import (
- "crypto/sha256"
- "strings"
- "testing"
-
- "github.com/AdguardTeam/AdGuardHome/internal/aghtest"
- "github.com/AdguardTeam/golibs/cache"
- "github.com/miekg/dns"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-func TestSafeBrowsingHash(t *testing.T) {
- // test hostnameToHashes()
- hashes := hostnameToHashes("1.2.3.sub.host.com")
- assert.Len(t, hashes, 3)
- _, ok := hashes[sha256.Sum256([]byte("3.sub.host.com"))]
- assert.True(t, ok)
- _, ok = hashes[sha256.Sum256([]byte("sub.host.com"))]
- assert.True(t, ok)
- _, ok = hashes[sha256.Sum256([]byte("host.com"))]
- assert.True(t, ok)
- _, ok = hashes[sha256.Sum256([]byte("com"))]
- assert.False(t, ok)
-
- c := &sbCtx{
- svc: "SafeBrowsing",
- hashToHost: hashes,
- }
-
- q := c.getQuestion()
-
- assert.Contains(t, q, "7a1b.")
- assert.Contains(t, q, "af5a.")
- assert.Contains(t, q, "eb11.")
- assert.True(t, strings.HasSuffix(q, "sb.dns.adguard.com."))
-}
-
-func TestSafeBrowsingCache(t *testing.T) {
- c := &sbCtx{
- svc: "SafeBrowsing",
- cacheTime: 100,
- }
- conf := cache.Config{}
- c.cache = cache.New(conf)
-
- // store in cache hashes for "3.sub.host.com" and "host.com"
- // and empty data for hash-prefix for "sub.host.com"
- hash := sha256.Sum256([]byte("sub.host.com"))
- c.hashToHost = make(map[[32]byte]string)
- c.hashToHost[hash] = "sub.host.com"
- var hashesArray [][]byte
- hash4 := sha256.Sum256([]byte("3.sub.host.com"))
- hashesArray = append(hashesArray, hash4[:])
- hash2 := sha256.Sum256([]byte("host.com"))
- hashesArray = append(hashesArray, hash2[:])
- c.storeCache(hashesArray)
-
- // match "3.sub.host.com" or "host.com" from cache
- c.hashToHost = make(map[[32]byte]string)
- hash = sha256.Sum256([]byte("3.sub.host.com"))
- c.hashToHost[hash] = "3.sub.host.com"
- hash = sha256.Sum256([]byte("sub.host.com"))
- c.hashToHost[hash] = "sub.host.com"
- hash = sha256.Sum256([]byte("host.com"))
- c.hashToHost[hash] = "host.com"
- assert.Equal(t, 1, c.getCached())
-
- // match "sub.host.com" from cache
- c.hashToHost = make(map[[32]byte]string)
- hash = sha256.Sum256([]byte("sub.host.com"))
- c.hashToHost[hash] = "sub.host.com"
- assert.Equal(t, -1, c.getCached())
-
- // Match "sub.host.com" from cache. Another hash for "host.example" is not
- // in the cache, so get data for it from the server.
- c.hashToHost = make(map[[32]byte]string)
- hash = sha256.Sum256([]byte("sub.host.com"))
- c.hashToHost[hash] = "sub.host.com"
- hash = sha256.Sum256([]byte("host.example"))
- c.hashToHost[hash] = "host.example"
- assert.Empty(t, c.getCached())
-
- hash = sha256.Sum256([]byte("sub.host.com"))
- _, ok := c.hashToHost[hash]
- assert.False(t, ok)
-
- hash = sha256.Sum256([]byte("host.example"))
- _, ok = c.hashToHost[hash]
- assert.True(t, ok)
-
- c = &sbCtx{
- svc: "SafeBrowsing",
- cacheTime: 100,
- }
- conf = cache.Config{}
- c.cache = cache.New(conf)
-
- hash = sha256.Sum256([]byte("sub.host.com"))
- c.hashToHost = make(map[[32]byte]string)
- c.hashToHost[hash] = "sub.host.com"
-
- c.cache.Set(hash[0:2], make([]byte, 32))
- assert.Empty(t, c.getCached())
-}
-
-func TestSBPC_checkErrorUpstream(t *testing.T) {
- d, _ := newForTest(t, &Config{SafeBrowsingEnabled: true}, nil)
- t.Cleanup(d.Close)
-
- ups := aghtest.NewErrorUpstream()
- d.SetSafeBrowsingUpstream(ups)
- d.SetParentalUpstream(ups)
-
- setts := &Settings{
- ProtectionEnabled: true,
- SafeBrowsingEnabled: true,
- ParentalEnabled: true,
- }
-
- _, err := d.checkSafeBrowsing("smthng.com", dns.TypeA, setts)
- assert.Error(t, err)
-
- _, err = d.checkParental("smthng.com", dns.TypeA, setts)
- assert.Error(t, err)
-}
-
-func TestSBPC(t *testing.T) {
- d, _ := newForTest(t, &Config{SafeBrowsingEnabled: true}, nil)
- t.Cleanup(d.Close)
-
- const hostname = "example.org"
-
- setts := &Settings{
- ProtectionEnabled: true,
- SafeBrowsingEnabled: true,
- ParentalEnabled: true,
- }
-
- testCases := []struct {
- testCache cache.Cache
- testFunc func(host string, _ uint16, _ *Settings) (res Result, err error)
- name string
- block bool
- }{{
- testCache: d.safebrowsingCache,
- testFunc: d.checkSafeBrowsing,
- name: "sb_no_block",
- block: false,
- }, {
- testCache: d.safebrowsingCache,
- testFunc: d.checkSafeBrowsing,
- name: "sb_block",
- block: true,
- }, {
- testCache: d.parentalCache,
- testFunc: d.checkParental,
- name: "pc_no_block",
- block: false,
- }, {
- testCache: d.parentalCache,
- testFunc: d.checkParental,
- name: "pc_block",
- block: true,
- }}
-
- for _, tc := range testCases {
- // Prepare the upstream.
- ups := aghtest.NewBlockUpstream(hostname, tc.block)
-
- var numReq int
- onExchange := ups.OnExchange
- ups.OnExchange = func(req *dns.Msg) (resp *dns.Msg, err error) {
- numReq++
-
- return onExchange(req)
- }
-
- d.SetSafeBrowsingUpstream(ups)
- d.SetParentalUpstream(ups)
-
- t.Run(tc.name, func(t *testing.T) {
- // Firstly, check the request blocking.
- hits := 0
- res, err := tc.testFunc(hostname, dns.TypeA, setts)
- require.NoError(t, err)
-
- if tc.block {
- assert.True(t, res.IsFiltered)
- require.Len(t, res.Rules, 1)
- hits++
- } else {
- require.False(t, res.IsFiltered)
- }
-
- // Check the cache state, check the response is now cached.
- assert.Equal(t, 1, tc.testCache.Stats().Count)
- assert.Equal(t, hits, tc.testCache.Stats().Hit)
-
- // There was one request to an upstream.
- assert.Equal(t, 1, numReq)
-
- // Now make the same request to check the cache was used.
- res, err = tc.testFunc(hostname, dns.TypeA, setts)
- require.NoError(t, err)
-
- if tc.block {
- assert.True(t, res.IsFiltered)
- require.Len(t, res.Rules, 1)
- } else {
- require.False(t, res.IsFiltered)
- }
-
- // Check the cache state, it should've been used.
- assert.Equal(t, 1, tc.testCache.Stats().Count)
- assert.Equal(t, hits+1, tc.testCache.Stats().Hit)
-
- // Check that there were no additional requests.
- assert.Equal(t, 1, numReq)
- })
-
- purgeCaches(d)
- }
-}
diff --git a/internal/filtering/servicelist.go b/internal/filtering/servicelist.go
index 7cf33bfc..8bff3638 100644
--- a/internal/filtering/servicelist.go
+++ b/internal/filtering/servicelist.go
@@ -12,6 +12,14 @@ type blockedService struct {
// blockedServices contains raw blocked service data.
var blockedServices = []blockedService{{
+ ID: "500px",
+ Name: "500px",
+ IconSVG: []byte(""),
+ Rules: []string{
+ "||500px.com^",
+ "||500px.org^",
+ },
+}, {
ID: "9gag",
Name: "9GAG",
IconSVG: []byte(""),
@@ -1180,6 +1188,18 @@ var blockedServices = []blockedService{{
"||zuckerberg.com^",
"||zuckerberg.net^",
},
+}, {
+ ID: "flickr",
+ Name: "Flickr",
+ IconSVG: []byte(""),
+ Rules: []string{
+ "||flic.kr^",
+ "||flickr.com^",
+ "||flickr.net^",
+ "||flickrprints.com^",
+ "||flickrpro.com^",
+ "||staticflickr.com^",
+ },
}, {
ID: "gog",
Name: "GOG",
@@ -1325,6 +1345,13 @@ var blockedServices = []blockedService{{
"||kakao.com^",
"||kgslb.com^",
},
+}, {
+ ID: "kik",
+ Name: "Kik",
+ IconSVG: []byte(""),
+ Rules: []string{
+ "||kik.com^",
+ },
}, {
ID: "lazada",
Name: "Lazada",
@@ -1385,6 +1412,7 @@ var blockedServices = []blockedService{{
Rules: []string{
"||aus.social^",
"||awscommunity.social^",
+ "||climatejustice.social^",
"||cyberplace.social^",
"||defcon.social^",
"||det.social^",
@@ -1442,13 +1470,13 @@ var blockedServices = []blockedService{{
"||mstdn.plus^",
"||mstdn.social^",
"||muenchen.social^",
- "||muenster.im^",
"||newsie.social^",
"||noc.social^",
"||norden.social^",
"||nrw.social^",
"||o3o.ca^",
"||ohai.social^",
+ "||pewtix.com^",
"||piaille.fr^",
"||pol.social^",
"||ravenation.club^",
@@ -1480,7 +1508,6 @@ var blockedServices = []blockedService{{
"||union.place^",
"||universeodon.com^",
"||urbanists.social^",
- "||wien.rocks^",
"||wxw.moe^",
},
}, {
@@ -1827,6 +1854,13 @@ var blockedServices = []blockedService{{
"||tx.me^",
"||usercontent.dev^",
},
+}, {
+ ID: "tidal",
+ Name: "Tidal",
+ IconSVG: []byte(""),
+ Rules: []string{
+ "||tidal.com^",
+ },
}, {
ID: "tiktok",
Name: "TikTok",
diff --git a/internal/home/client.go b/internal/home/client.go
index 5e56df19..1aee021e 100644
--- a/internal/home/client.go
+++ b/internal/home/client.go
@@ -8,6 +8,7 @@ import (
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
"github.com/AdguardTeam/AdGuardHome/internal/filtering/safesearch"
"github.com/AdguardTeam/dnsproxy/proxy"
+ "github.com/AdguardTeam/golibs/stringutil"
)
// Client contains information about persistent clients.
@@ -37,6 +38,19 @@ type Client struct {
IgnoreStatistics bool
}
+// ShallowClone returns a deep copy of the client, except upstreamConfig,
+// safeSearchConf, SafeSearch fields, because it's difficult to copy them.
+func (c *Client) ShallowClone() (sh *Client) {
+ clone := *c
+
+ clone.IDs = stringutil.CloneSlice(c.IDs)
+ clone.Tags = stringutil.CloneSlice(c.Tags)
+ clone.BlockedServices = stringutil.CloneSlice(c.BlockedServices)
+ clone.Upstreams = stringutil.CloneSlice(c.Upstreams)
+
+ return &clone
+}
+
// closeUpstreams closes the client-specific upstream config of c if any.
func (c *Client) closeUpstreams() (err error) {
if c.upstreamConfig != nil {
diff --git a/internal/home/clients.go b/internal/home/clients.go
index 1ea0247a..d2e4194b 100644
--- a/internal/home/clients.go
+++ b/internal/home/clients.go
@@ -378,6 +378,7 @@ func (clients *clientsContainer) clientOrArtificial(
}, true
}
+// Find returns a shallow copy of the client if there is one found.
func (clients *clientsContainer) Find(id string) (c *Client, ok bool) {
clients.lock.Lock()
defer clients.lock.Unlock()
@@ -387,20 +388,18 @@ func (clients *clientsContainer) Find(id string) (c *Client, ok bool) {
return nil, false
}
- c.IDs = stringutil.CloneSlice(c.IDs)
- c.Tags = stringutil.CloneSlice(c.Tags)
- c.BlockedServices = stringutil.CloneSlice(c.BlockedServices)
- c.Upstreams = stringutil.CloneSlice(c.Upstreams)
-
- return c, true
+ return c.ShallowClone(), true
}
// shouldCountClient is a wrapper around Find to make it a valid client
// information finder for the statistics. If no information about the client
// is found, it returns true.
func (clients *clientsContainer) shouldCountClient(ids []string) (y bool) {
+ clients.lock.Lock()
+ defer clients.lock.Unlock()
+
for _, id := range ids {
- client, ok := clients.Find(id)
+ client, ok := clients.findLocked(id)
if ok {
return !client.IgnoreStatistics
}
@@ -617,6 +616,15 @@ func (clients *clientsContainer) Add(c *Client) (ok bool, err error) {
}
}
+ clients.add(c)
+
+ log.Debug("clients: added %q: ID:%q [%d]", c.Name, c.IDs, len(clients.list))
+
+ return true, nil
+}
+
+// add c to the indexes. clients.lock is expected to be locked.
+func (clients *clientsContainer) add(c *Client) {
// update Name index
clients.list[c.Name] = c
@@ -624,10 +632,6 @@ func (clients *clientsContainer) Add(c *Client) (ok bool, err error) {
for _, id := range c.IDs {
clients.idIndex[id] = c
}
-
- log.Debug("clients: added %q: ID:%q [%d]", c.Name, c.IDs, len(clients.list))
-
- return true, nil
}
// Del removes a client. ok is false if there is no such client.
@@ -645,86 +649,53 @@ func (clients *clientsContainer) Del(name string) (ok bool) {
log.Error("client container: removing client %s: %s", name, err)
}
+ clients.del(c)
+
+ return true
+}
+
+// del removes c from the indexes. clients.lock is expected to be locked.
+func (clients *clientsContainer) del(c *Client) {
// update Name index
- delete(clients.list, name)
+ delete(clients.list, c.Name)
// update ID index
for _, id := range c.IDs {
delete(clients.idIndex, id)
}
-
- return true
}
// Update updates a client by its name.
-func (clients *clientsContainer) Update(name string, c *Client) (err error) {
+func (clients *clientsContainer) Update(prev, c *Client) (err error) {
err = clients.check(c)
if err != nil {
+ // Don't wrap the error since it's informative enough as is.
return err
}
clients.lock.Lock()
defer clients.lock.Unlock()
- prev, ok := clients.list[name]
- if !ok {
- return errors.Error("client not found")
- }
-
- // First, check the name index.
+ // Check the name index.
if prev.Name != c.Name {
- _, ok = clients.list[c.Name]
+ _, ok := clients.list[c.Name]
if ok {
return errors.Error("client already exists")
}
}
- // Second, update the ID index.
- err = clients.updateIDIndex(prev, c.IDs)
- if err != nil {
- // Don't wrap the error, because it's informative enough as is.
- return err
- }
-
- // Update name index.
- if prev.Name != c.Name {
- delete(clients.list, prev.Name)
- clients.list[c.Name] = prev
- }
-
- // Update upstreams cache.
- err = c.closeUpstreams()
- if err != nil {
- return err
- }
-
- *prev = *c
-
- return nil
-}
-
-// updateIDIndex updates the ID index data for cli using the information from
-// newIDs.
-func (clients *clientsContainer) updateIDIndex(cli *Client, newIDs []string) (err error) {
- if slices.Equal(cli.IDs, newIDs) {
- return nil
- }
-
- for _, id := range newIDs {
- existing, ok := clients.idIndex[id]
- if ok && existing != cli {
- return fmt.Errorf("id %q is used by client with name %q", id, existing.Name)
+ // Check the ID index.
+ if !slices.Equal(prev.IDs, c.IDs) {
+ for _, id := range c.IDs {
+ existing, ok := clients.idIndex[id]
+ if ok && existing != prev {
+ return fmt.Errorf("id %q is used by client with name %q", id, existing.Name)
+ }
}
}
- // Update the IDs in the index.
- for _, id := range cli.IDs {
- delete(clients.idIndex, id)
- }
-
- for _, id := range newIDs {
- clients.idIndex[id] = cli
- }
+ clients.del(prev)
+ clients.add(c)
return nil
}
diff --git a/internal/home/clients_test.go b/internal/home/clients_test.go
index ebf879ef..8361528a 100644
--- a/internal/home/clients_test.go
+++ b/internal/home/clients_test.go
@@ -98,22 +98,8 @@ func TestClients(t *testing.T) {
assert.False(t, ok)
})
- t.Run("update_fail_name", func(t *testing.T) {
- err := clients.Update("client3", &Client{
- IDs: []string{"1.2.3.0"},
- Name: "client3",
- })
- require.Error(t, err)
-
- err = clients.Update("client3", &Client{
- IDs: []string{"1.2.3.0"},
- Name: "client2",
- })
- assert.Error(t, err)
- })
-
t.Run("update_fail_ip", func(t *testing.T) {
- err := clients.Update("client1", &Client{
+ err := clients.Update(&Client{Name: "client1"}, &Client{
IDs: []string{"2.2.2.2"},
Name: "client1",
})
@@ -129,7 +115,10 @@ func TestClients(t *testing.T) {
cliNewIP = netip.MustParseAddr(cliNew)
)
- err := clients.Update("client1", &Client{
+ prev, ok := clients.list["client1"]
+ require.True(t, ok)
+
+ err := clients.Update(prev, &Client{
IDs: []string{cliNew},
Name: "client1",
})
@@ -138,7 +127,10 @@ func TestClients(t *testing.T) {
assert.Equal(t, clients.clientSource(cliOldIP), ClientSourceNone)
assert.Equal(t, clients.clientSource(cliNewIP), ClientSourcePersistent)
- err = clients.Update("client1", &Client{
+ prev, ok = clients.list["client1"]
+ require.True(t, ok)
+
+ err = clients.Update(prev, &Client{
IDs: []string{cliNew},
Name: "client1-renamed",
UseOwnSettings: true,
diff --git a/internal/home/clientshttp.go b/internal/home/clientshttp.go
index 82a16713..6425f941 100644
--- a/internal/home/clientshttp.go
+++ b/internal/home/clientshttp.go
@@ -289,7 +289,7 @@ func (clients *clientsContainer) handleUpdateClient(w http.ResponseWriter, r *ht
return
}
- err = clients.Update(dj.Name, c)
+ err = clients.Update(prev, c)
if err != nil {
aghhttp.Error(r, w, http.StatusBadRequest, "%s", err)
diff --git a/internal/home/config.go b/internal/home/config.go
index c97e671c..8d9fa422 100644
--- a/internal/home/config.go
+++ b/internal/home/config.go
@@ -399,19 +399,39 @@ func (c *configuration) getConfigFilename() string {
return configFile
}
-// getLogSettings reads logging settings from the config file.
-// we do it in a separate method in order to configure logger before the actual configuration is parsed and applied.
-func getLogSettings() logSettings {
- l := logSettings{}
+// readLogSettings reads logging settings from the config file. We do it in a
+// separate method in order to configure logger before the actual configuration
+// is parsed and applied.
+func readLogSettings() (ls *logSettings) {
+ ls = &logSettings{}
+
yamlFile, err := readConfigFile()
if err != nil {
- return l
+ return ls
}
- err = yaml.Unmarshal(yamlFile, &l)
+
+ err = yaml.Unmarshal(yamlFile, ls)
if err != nil {
log.Error("Couldn't get logging settings from the configuration: %s", err)
}
- return l
+
+ return ls
+}
+
+// validateBindHosts returns error if any of binding hosts from configuration is
+// not a valid IP address.
+func validateBindHosts(conf *configuration) (err error) {
+ if !conf.BindHost.IsValid() {
+ return errors.Error("bind_host is not a valid ip address")
+ }
+
+ for i, addr := range conf.DNS.BindHosts {
+ if !addr.IsValid() {
+ return fmt.Errorf("dns.bind_hosts at index %d is not a valid ip address", i)
+ }
+ }
+
+ return nil
}
// parseConfig loads configuration from the YAML file
@@ -425,6 +445,13 @@ func parseConfig() (err error) {
config.fileData = nil
err = yaml.Unmarshal(fileData, &config)
if err != nil {
+ // Don't wrap the error since it's informative enough as is.
+ return err
+ }
+
+ err = validateBindHosts(config)
+ if err != nil {
+ // Don't wrap the error since it's informative enough as is.
return err
}
diff --git a/internal/home/control.go b/internal/home/control.go
index 9c48d5bc..ae83507c 100644
--- a/internal/home/control.go
+++ b/internal/home/control.go
@@ -180,7 +180,7 @@ func registerControlHandlers() {
httpRegister(http.MethodGet, "/control/status", handleStatus)
httpRegister(http.MethodPost, "/control/i18n/change_language", handleI18nChangeLanguage)
httpRegister(http.MethodGet, "/control/i18n/current_language", handleI18nCurrentLanguage)
- Context.mux.HandleFunc("/control/version.json", postInstall(optionalAuth(handleGetVersionJSON)))
+ Context.mux.HandleFunc("/control/version.json", postInstall(optionalAuth(handleVersionJSON)))
httpRegister(http.MethodPost, "/control/update", handleUpdate)
httpRegister(http.MethodGet, "/control/profile", handleGetProfile)
httpRegister(http.MethodPut, "/control/profile/update", handlePutProfile)
diff --git a/internal/home/controlupdate.go b/internal/home/controlupdate.go
index 1cea1d14..f7e56208 100644
--- a/internal/home/controlupdate.go
+++ b/internal/home/controlupdate.go
@@ -26,15 +26,14 @@ type temporaryError interface {
Temporary() (ok bool)
}
-// Get the latest available version from the Internet
-func handleGetVersionJSON(w http.ResponseWriter, r *http.Request) {
+// handleVersionJSON is the handler for the POST /control/version.json HTTP API.
+//
+// TODO(a.garipov): Find out if this API used with a GET method by anyone.
+func handleVersionJSON(w http.ResponseWriter, r *http.Request) {
resp := &versionResponse{}
if Context.disableUpdate {
resp.Disabled = true
- err := json.NewEncoder(w).Encode(resp)
- if err != nil {
- aghhttp.Error(r, w, http.StatusInternalServerError, "writing body: %s", err)
- }
+ _ = aghhttp.WriteJSONResponse(w, r, resp)
return
}
diff --git a/internal/home/home.go b/internal/home/home.go
index 443bdc0f..5f1dd6f2 100644
--- a/internal/home/home.go
+++ b/internal/home/home.go
@@ -27,14 +27,17 @@ import (
"github.com/AdguardTeam/AdGuardHome/internal/dhcpd"
"github.com/AdguardTeam/AdGuardHome/internal/dnsforward"
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
+ "github.com/AdguardTeam/AdGuardHome/internal/filtering/hashprefix"
"github.com/AdguardTeam/AdGuardHome/internal/filtering/safesearch"
"github.com/AdguardTeam/AdGuardHome/internal/querylog"
"github.com/AdguardTeam/AdGuardHome/internal/stats"
"github.com/AdguardTeam/AdGuardHome/internal/updater"
"github.com/AdguardTeam/AdGuardHome/internal/version"
+ "github.com/AdguardTeam/dnsproxy/upstream"
"github.com/AdguardTeam/golibs/errors"
"github.com/AdguardTeam/golibs/log"
"github.com/AdguardTeam/golibs/netutil"
+ "github.com/AdguardTeam/golibs/stringutil"
"golang.org/x/exp/slices"
"gopkg.in/natefinch/lumberjack.v2"
)
@@ -143,7 +146,9 @@ func Main(clientBuildFS fs.FS) {
run(opts, clientBuildFS)
}
-func setupContext(opts options) {
+// setupContext initializes [Context] fields. It also reads and upgrades
+// config file if necessary.
+func setupContext(opts options) (err error) {
setupContextFlags(opts)
Context.tlsRoots = aghtls.SystemRootCAs()
@@ -160,10 +165,15 @@ func setupContext(opts options) {
},
}
+ Context.mux = http.NewServeMux()
+
if !Context.firstRun {
// Do the upgrade if necessary.
- err := upgradeConfig()
- fatalOnError(err)
+ err = upgradeConfig()
+ if err != nil {
+ // Don't wrap the error, because it's informative enough as is.
+ return err
+ }
if err = parseConfig(); err != nil {
log.Error("parsing configuration file: %s", err)
@@ -179,11 +189,14 @@ func setupContext(opts options) {
if !opts.noEtcHosts && config.Clients.Sources.HostsFile {
err = setupHostsContainer()
- fatalOnError(err)
+ if err != nil {
+ // Don't wrap the error, because it's informative enough as is.
+ return err
+ }
}
}
- Context.mux = http.NewServeMux()
+ return nil
}
// setupContextFlags sets global flags and prints their status to the log.
@@ -285,25 +298,27 @@ func setupHostsContainer() (err error) {
return nil
}
-func setupConfig(opts options) (err error) {
- config.DNS.DnsfilterConf.EtcHosts = Context.etcHosts
- config.DNS.DnsfilterConf.ConfigModified = onConfigModified
- config.DNS.DnsfilterConf.HTTPRegister = httpRegister
- config.DNS.DnsfilterConf.DataDir = Context.getDataDir()
- config.DNS.DnsfilterConf.Filters = slices.Clone(config.Filters)
- config.DNS.DnsfilterConf.WhitelistFilters = slices.Clone(config.WhitelistFilters)
- config.DNS.DnsfilterConf.UserRules = slices.Clone(config.UserRules)
- config.DNS.DnsfilterConf.HTTPClient = Context.client
-
- config.DNS.DnsfilterConf.SafeSearchConf.CustomResolver = safeSearchResolver{}
- config.DNS.DnsfilterConf.SafeSearch, err = safesearch.NewDefault(
- config.DNS.DnsfilterConf.SafeSearchConf,
- "default",
- config.DNS.DnsfilterConf.SafeSearchCacheSize,
- time.Minute*time.Duration(config.DNS.DnsfilterConf.CacheTime),
- )
+// setupOpts sets up command-line options.
+func setupOpts(opts options) (err error) {
+ err = setupBindOpts(opts)
if err != nil {
- return fmt.Errorf("initializing safesearch: %w", err)
+ // Don't wrap the error, because it's informative enough as is.
+ return err
+ }
+
+ if len(opts.pidFile) != 0 && writePIDFile(opts.pidFile) {
+ Context.pidFileName = opts.pidFile
+ }
+
+ return nil
+}
+
+// initContextClients initializes Context clients and related fields.
+func initContextClients() (err error) {
+ err = setupDNSFilteringConf(config.DNS.DnsfilterConf)
+ if err != nil {
+ // Don't wrap the error, because it's informative enough as is.
+ return err
}
//lint:ignore SA1019 Migration is not over.
@@ -338,8 +353,19 @@ func setupConfig(opts options) (err error) {
arpdb = aghnet.NewARPDB()
}
- Context.clients.Init(config.Clients.Persistent, Context.dhcpServer, Context.etcHosts, arpdb, config.DNS.DnsfilterConf)
+ Context.clients.Init(
+ config.Clients.Persistent,
+ Context.dhcpServer,
+ Context.etcHosts,
+ arpdb,
+ config.DNS.DnsfilterConf,
+ )
+ return nil
+}
+
+// setupBindOpts overrides bind host/port from the opts.
+func setupBindOpts(opts options) (err error) {
if opts.bindPort != 0 {
config.BindPort = opts.bindPort
@@ -350,12 +376,83 @@ func setupConfig(opts options) (err error) {
}
}
- // override bind host/port from the console
if opts.bindHost.IsValid() {
config.BindHost = opts.bindHost
}
- if len(opts.pidFile) != 0 && writePIDFile(opts.pidFile) {
- Context.pidFileName = opts.pidFile
+
+ return nil
+}
+
+// setupDNSFilteringConf sets up DNS filtering configuration settings.
+func setupDNSFilteringConf(conf *filtering.Config) (err error) {
+ const (
+ dnsTimeout = 3 * time.Second
+
+ sbService = "safe browsing"
+ defaultSafeBrowsingServer = `https://family.adguard-dns.com/dns-query`
+ sbTXTSuffix = `sb.dns.adguard.com.`
+
+ pcService = "parental control"
+ defaultParentalServer = `https://family.adguard-dns.com/dns-query`
+ pcTXTSuffix = `pc.dns.adguard.com.`
+ )
+
+ conf.EtcHosts = Context.etcHosts
+ conf.ConfigModified = onConfigModified
+ conf.HTTPRegister = httpRegister
+ conf.DataDir = Context.getDataDir()
+ conf.Filters = slices.Clone(config.Filters)
+ conf.WhitelistFilters = slices.Clone(config.WhitelistFilters)
+ conf.UserRules = slices.Clone(config.UserRules)
+ conf.HTTPClient = Context.client
+
+ cacheTime := time.Duration(conf.CacheTime) * time.Minute
+
+ upsOpts := &upstream.Options{
+ Timeout: dnsTimeout,
+ ServerIPAddrs: []net.IP{
+ {94, 140, 14, 15},
+ {94, 140, 15, 16},
+ net.ParseIP("2a10:50c0::bad1:ff"),
+ net.ParseIP("2a10:50c0::bad2:ff"),
+ },
+ }
+
+ sbUps, err := upstream.AddressToUpstream(defaultSafeBrowsingServer, upsOpts)
+ if err != nil {
+ return fmt.Errorf("converting safe browsing server: %w", err)
+ }
+
+ conf.SafeBrowsingChecker = hashprefix.New(&hashprefix.Config{
+ Upstream: sbUps,
+ ServiceName: sbService,
+ TXTSuffix: sbTXTSuffix,
+ CacheTime: cacheTime,
+ CacheSize: conf.SafeBrowsingCacheSize,
+ })
+
+ parUps, err := upstream.AddressToUpstream(defaultParentalServer, upsOpts)
+ if err != nil {
+ return fmt.Errorf("converting parental server: %w", err)
+ }
+
+ conf.ParentalControlChecker = hashprefix.New(&hashprefix.Config{
+ Upstream: parUps,
+ ServiceName: pcService,
+ TXTSuffix: pcTXTSuffix,
+ CacheTime: cacheTime,
+ CacheSize: conf.SafeBrowsingCacheSize,
+ })
+
+ conf.SafeSearchConf.CustomResolver = safeSearchResolver{}
+ conf.SafeSearch, err = safesearch.NewDefault(
+ conf.SafeSearchConf,
+ "default",
+ conf.SafeSearchCacheSize,
+ cacheTime,
+ )
+ if err != nil {
+ return fmt.Errorf("initializing safesearch: %w", err)
}
return nil
@@ -432,14 +529,16 @@ func fatalOnError(err error) {
// run configures and starts AdGuard Home.
func run(opts options, clientBuildFS fs.FS) {
- // configure config filename
+ // Configure config filename.
initConfigFilename(opts)
- // configure working dir and config path
- initWorkingDir(opts)
+ // Configure working dir and config path.
+ err := initWorkingDir(opts)
+ fatalOnError(err)
- // configure log level and output
- configureLogger(opts)
+ // Configure log level and output.
+ err = configureLogger(opts)
+ fatalOnError(err)
// Print the first message after logger is configured.
log.Info(version.Full())
@@ -448,25 +547,29 @@ func run(opts options, clientBuildFS fs.FS) {
log.Info("AdGuard Home is running as a service")
}
- setupContext(opts)
-
- err := configureOS(config)
+ err = setupContext(opts)
fatalOnError(err)
- // clients package uses filtering package's static data (filtering.BlockedSvcKnown()),
- // so we have to initialize filtering's static data first,
- // but also avoid relying on automatic Go init() function
+ err = configureOS(config)
+ fatalOnError(err)
+
+ // Clients package uses filtering package's static data
+ // (filtering.BlockedSvcKnown()), so we have to initialize filtering static
+ // data first, but also to avoid relying on automatic Go init() function.
filtering.InitModule()
- err = setupConfig(opts)
+ err = initContextClients()
fatalOnError(err)
- // TODO(e.burkov): This could be made earlier, probably as the option's
+ err = setupOpts(opts)
+ fatalOnError(err)
+
+ // TODO(e.burkov): This could be made earlier, probably as the option's
// effect.
cmdlineUpdate(opts)
if !Context.firstRun {
- // Save the updated config
+ // Save the updated config.
err = config.write()
fatalOnError(err)
@@ -476,33 +579,15 @@ func run(opts options, clientBuildFS fs.FS) {
}
}
- err = os.MkdirAll(Context.getDataDir(), 0o755)
- if err != nil {
- log.Fatalf("Cannot create DNS data dir at %s: %s", Context.getDataDir(), err)
- }
+ dir := Context.getDataDir()
+ err = os.MkdirAll(dir, 0o755)
+ fatalOnError(errors.Annotate(err, "creating DNS data dir at %s: %w", dir))
- sessFilename := filepath.Join(Context.getDataDir(), "sessions.db")
GLMode = opts.glinetMode
- var rateLimiter *authRateLimiter
- if config.AuthAttempts > 0 && config.AuthBlockMin > 0 {
- rateLimiter = newAuthRateLimiter(
- time.Duration(config.AuthBlockMin)*time.Minute,
- config.AuthAttempts,
- )
- } else {
- log.Info("authratelimiter is disabled")
- }
- Context.auth = InitAuth(
- sessFilename,
- config.Users,
- config.WebSessionTTLHours*60*60,
- rateLimiter,
- )
- if Context.auth == nil {
- log.Fatalf("Couldn't initialize Auth module")
- }
- config.Users = nil
+ // Init auth module.
+ Context.auth, err = initUsers()
+ fatalOnError(err)
Context.tls, err = newTLSManager(config.TLS)
if err != nil {
@@ -520,10 +605,10 @@ func run(opts options, clientBuildFS fs.FS) {
Context.tls.start()
go func() {
- serr := startDNSServer()
- if serr != nil {
+ sErr := startDNSServer()
+ if sErr != nil {
closeDNSServer()
- fatalOnError(serr)
+ fatalOnError(sErr)
}
}()
@@ -537,10 +622,33 @@ func run(opts options, clientBuildFS fs.FS) {
Context.web.start()
- // wait indefinitely for other go-routines to complete their job
+ // Wait indefinitely for other goroutines to complete their job.
select {}
}
+// initUsers initializes context auth module. Clears config users field.
+func initUsers() (auth *Auth, err error) {
+ sessFilename := filepath.Join(Context.getDataDir(), "sessions.db")
+
+ var rateLimiter *authRateLimiter
+ if config.AuthAttempts > 0 && config.AuthBlockMin > 0 {
+ blockDur := time.Duration(config.AuthBlockMin) * time.Minute
+ rateLimiter = newAuthRateLimiter(blockDur, config.AuthAttempts)
+ } else {
+ log.Info("authratelimiter is disabled")
+ }
+
+ sessionTTL := config.WebSessionTTLHours * 60 * 60
+ auth = InitAuth(sessFilename, config.Users, sessionTTL, rateLimiter)
+ if auth == nil {
+ return nil, errors.Error("initializing auth module failed")
+ }
+
+ config.Users = nil
+
+ return auth, nil
+}
+
func (c *configuration) anonymizer() (ipmut *aghnet.IPMut) {
var anonFunc aghnet.IPMutFunc
if c.DNS.AnonymizeClientIP {
@@ -613,22 +721,19 @@ func writePIDFile(fn string) bool {
return true
}
+// initConfigFilename sets up context config file path. This file path can be
+// overridden by command-line arguments, or is set to default.
func initConfigFilename(opts options) {
- // config file path can be overridden by command-line arguments:
- if opts.confFilename != "" {
- Context.configFilename = opts.confFilename
- } else {
- // Default config file name
- Context.configFilename = "AdGuardHome.yaml"
- }
+ Context.configFilename = stringutil.Coalesce(opts.confFilename, "AdGuardHome.yaml")
}
-// initWorkingDir initializes the workDir
-// if no command-line arguments specified, we use the directory where our binary file is located
-func initWorkingDir(opts options) {
+// initWorkingDir initializes the workDir. If no command-line arguments are
+// specified, the directory with the binary file is used.
+func initWorkingDir(opts options) (err error) {
execPath, err := os.Executable()
if err != nil {
- panic(err)
+ // Don't wrap the error, because it's informative enough as is.
+ return err
}
if opts.workDir != "" {
@@ -640,34 +745,20 @@ func initWorkingDir(opts options) {
workDir, err := filepath.EvalSymlinks(Context.workDir)
if err != nil {
- panic(err)
+ // Don't wrap the error, because it's informative enough as is.
+ return err
}
Context.workDir = workDir
+
+ return nil
}
-// configureLogger configures logger level and output
-func configureLogger(opts options) {
- ls := getLogSettings()
+// configureLogger configures logger level and output.
+func configureLogger(opts options) (err error) {
+ ls := getLogSettings(opts)
- // command-line arguments can override config settings
- if opts.verbose || config.Verbose {
- ls.Verbose = true
- }
- if opts.logFile != "" {
- ls.File = opts.logFile
- } else if config.File != "" {
- ls.File = config.File
- }
-
- // Handle default log settings overrides
- ls.Compress = config.Compress
- ls.LocalTime = config.LocalTime
- ls.MaxBackups = config.MaxBackups
- ls.MaxSize = config.MaxSize
- ls.MaxAge = config.MaxAge
-
- // log.SetLevel(log.INFO) - default
+ // Configure logger level.
if ls.Verbose {
log.SetLevel(log.DEBUG)
}
@@ -676,38 +767,63 @@ func configureLogger(opts options) {
// happen pretty quickly.
log.SetFlags(log.LstdFlags | log.Lmicroseconds)
- if opts.runningAsService && ls.File == "" && runtime.GOOS == "windows" {
- // When running as a Windows service, use eventlog by default if nothing
- // else is configured. Otherwise, we'll simply lose the log output.
- ls.File = configSyslog
- }
-
- // logs are written to stdout (default)
+ // Write logs to stdout by default.
if ls.File == "" {
- return
+ return nil
}
if ls.File == configSyslog {
- // Use syslog where it is possible and eventlog on Windows
- err := aghos.ConfigureSyslog(serviceName)
+ // Use syslog where it is possible and eventlog on Windows.
+ err = aghos.ConfigureSyslog(serviceName)
if err != nil {
- log.Fatalf("cannot initialize syslog: %s", err)
- }
- } else {
- logFilePath := ls.File
- if !filepath.IsAbs(logFilePath) {
- logFilePath = filepath.Join(Context.workDir, logFilePath)
+ return fmt.Errorf("cannot initialize syslog: %w", err)
}
- log.SetOutput(&lumberjack.Logger{
- Filename: logFilePath,
- Compress: ls.Compress, // disabled by default
- LocalTime: ls.LocalTime,
- MaxBackups: ls.MaxBackups,
- MaxSize: ls.MaxSize, // megabytes
- MaxAge: ls.MaxAge, // days
- })
+ return nil
}
+
+ logFilePath := ls.File
+ if !filepath.IsAbs(logFilePath) {
+ logFilePath = filepath.Join(Context.workDir, logFilePath)
+ }
+
+ log.SetOutput(&lumberjack.Logger{
+ Filename: logFilePath,
+ Compress: ls.Compress,
+ LocalTime: ls.LocalTime,
+ MaxBackups: ls.MaxBackups,
+ MaxSize: ls.MaxSize,
+ MaxAge: ls.MaxAge,
+ })
+
+ return nil
+}
+
+// getLogSettings returns a log settings object properly initialized from opts.
+func getLogSettings(opts options) (ls *logSettings) {
+ ls = readLogSettings()
+
+ // Command-line arguments can override config settings.
+ if opts.verbose || config.Verbose {
+ ls.Verbose = true
+ }
+
+ ls.File = stringutil.Coalesce(opts.logFile, config.File, ls.File)
+
+ // Handle default log settings overrides.
+ ls.Compress = config.Compress
+ ls.LocalTime = config.LocalTime
+ ls.MaxBackups = config.MaxBackups
+ ls.MaxSize = config.MaxSize
+ ls.MaxAge = config.MaxAge
+
+ if opts.runningAsService && ls.File == "" && runtime.GOOS == "windows" {
+ // When running as a Windows service, use eventlog by default if
+ // nothing else is configured. Otherwise, we'll lose the log output.
+ ls.File = configSyslog
+ }
+
+ return ls
}
// cleanup stops and resets all the modules.
diff --git a/internal/home/service.go b/internal/home/service.go
index c0fe845f..3ec44138 100644
--- a/internal/home/service.go
+++ b/internal/home/service.go
@@ -4,7 +4,6 @@ import (
"fmt"
"io/fs"
"os"
- "path/filepath"
"runtime"
"strconv"
"strings"
@@ -84,14 +83,9 @@ func svcStatus(s service.Service) (status service.Status, err error) {
// On OpenWrt, the service utility may not exist. We use our service script
// directly in this case.
func svcAction(s service.Service, action string) (err error) {
- if runtime.GOOS == "darwin" && action == "start" {
- var exe string
- if exe, err = os.Executable(); err != nil {
- log.Error("starting service: getting executable path: %s", err)
- } else if exe, err = filepath.EvalSymlinks(exe); err != nil {
- log.Error("starting service: evaluating executable symlinks: %s", err)
- } else if !strings.HasPrefix(exe, "/Applications/") {
- log.Info("warning: service must be started from within the /Applications directory")
+ if action == "start" {
+ if err = aghos.PreCheckActionStart(); err != nil {
+ log.Error("starting service: %s", err)
}
}
@@ -99,8 +93,6 @@ func svcAction(s service.Service, action string) (err error) {
if err != nil && service.Platform() == "unix-systemv" &&
(action == "start" || action == "stop" || action == "restart") {
_, err = runInitdCommand(action)
-
- return err
}
return err
@@ -224,6 +216,7 @@ func handleServiceControlAction(opts options, clientBuildFS fs.FS) {
runOpts := opts
runOpts.serviceControlAction = "run"
+
svcConfig := &service.Config{
Name: serviceName,
DisplayName: serviceDisplayName,
@@ -233,35 +226,48 @@ func handleServiceControlAction(opts options, clientBuildFS fs.FS) {
}
configureService(svcConfig)
- prg := &program{
- clientBuildFS: clientBuildFS,
- opts: runOpts,
- }
- var s service.Service
- if s, err = service.New(prg, svcConfig); err != nil {
+ s, err := service.New(&program{clientBuildFS: clientBuildFS, opts: runOpts}, svcConfig)
+ if err != nil {
log.Fatalf("service: initializing service: %s", err)
}
+ err = handleServiceCommand(s, action, opts)
+ if err != nil {
+ log.Fatalf("service: %s", err)
+ }
+
+ log.Printf(
+ "service: action %s has been done successfully on %s",
+ action,
+ service.ChosenSystem(),
+ )
+}
+
+// handleServiceCommand handles service command.
+func handleServiceCommand(s service.Service, action string, opts options) (err error) {
switch action {
case "status":
handleServiceStatusCommand(s)
case "run":
if err = s.Run(); err != nil {
- log.Fatalf("service: failed to run service: %s", err)
+ return fmt.Errorf("failed to run service: %w", err)
}
case "install":
initConfigFilename(opts)
- initWorkingDir(opts)
+ if err = initWorkingDir(opts); err != nil {
+ return fmt.Errorf("failed to init working dir: %w", err)
+ }
+
handleServiceInstallCommand(s)
case "uninstall":
handleServiceUninstallCommand(s)
default:
if err = svcAction(s, action); err != nil {
- log.Fatalf("service: executing action %q: %s", action, err)
+ return fmt.Errorf("executing action %q: %w", action, err)
}
}
- log.Printf("service: action %s has been done successfully on %s", action, service.ChosenSystem())
+ return nil
}
// handleServiceStatusCommand handles service "status" command.
diff --git a/internal/home/tls.go b/internal/home/tls.go
index b9b04eeb..84af6eae 100644
--- a/internal/home/tls.go
+++ b/internal/home/tls.go
@@ -172,9 +172,32 @@ func loadTLSConf(tlsConf *tlsConfigSettings, status *tlsConfigStatus) (err error
}
}()
- tlsConf.CertificateChainData = []byte(tlsConf.CertificateChain)
- tlsConf.PrivateKeyData = []byte(tlsConf.PrivateKey)
+ err = loadCertificateChainData(tlsConf, status)
+ if err != nil {
+ // Don't wrap the error, because it's informative enough as is.
+ return err
+ }
+ err = loadPrivateKeyData(tlsConf, status)
+ if err != nil {
+ // Don't wrap the error, because it's informative enough as is.
+ return err
+ }
+
+ err = validateCertificates(
+ status,
+ tlsConf.CertificateChainData,
+ tlsConf.PrivateKeyData,
+ tlsConf.ServerName,
+ )
+
+ return errors.Annotate(err, "validating certificate pair: %w")
+}
+
+// loadCertificateChainData loads PEM-encoded certificates chain data to the
+// TLS configuration.
+func loadCertificateChainData(tlsConf *tlsConfigSettings, status *tlsConfigStatus) (err error) {
+ tlsConf.CertificateChainData = []byte(tlsConf.CertificateChain)
if tlsConf.CertificatePath != "" {
if tlsConf.CertificateChain != "" {
return errors.Error("certificate data and file can't be set together")
@@ -190,6 +213,13 @@ func loadTLSConf(tlsConf *tlsConfigSettings, status *tlsConfigStatus) (err error
status.ValidCert = true
}
+ return nil
+}
+
+// loadPrivateKeyData loads PEM-encoded private key data to the TLS
+// configuration.
+func loadPrivateKeyData(tlsConf *tlsConfigSettings, status *tlsConfigStatus) (err error) {
+ tlsConf.PrivateKeyData = []byte(tlsConf.PrivateKey)
if tlsConf.PrivateKeyPath != "" {
if tlsConf.PrivateKey != "" {
return errors.Error("private key data and file can't be set together")
@@ -203,16 +233,6 @@ func loadTLSConf(tlsConf *tlsConfigSettings, status *tlsConfigStatus) (err error
status.ValidKey = true
}
- err = validateCertificates(
- status,
- tlsConf.CertificateChainData,
- tlsConf.PrivateKeyData,
- tlsConf.ServerName,
- )
- if err != nil {
- return fmt.Errorf("validating certificate pair: %w", err)
- }
-
return nil
}
diff --git a/internal/home/upgrade.go b/internal/home/upgrade.go
index a97386a7..e429eb41 100644
--- a/internal/home/upgrade.go
+++ b/internal/home/upgrade.go
@@ -41,7 +41,8 @@ func upgradeConfig() error {
err = yaml.Unmarshal(body, &diskConf)
if err != nil {
- log.Printf("Couldn't parse config file: %s", err)
+ log.Printf("parsing config file for upgrade: %s", err)
+
return err
}
@@ -293,71 +294,61 @@ func upgradeSchema4to5(diskConf yobj) error {
return nil
}
-// clients:
-// ...
+// upgradeSchema5to6 performs the following changes:
//
-// ip: 127.0.0.1
-// mac: ...
+// # BEFORE:
+// 'clients':
+// ...
+// 'ip': 127.0.0.1
+// 'mac': ...
//
-// ->
-//
-// clients:
-// ...
-//
-// ids:
-// - 127.0.0.1
-// - ...
+// # AFTER:
+// 'clients':
+// ...
+// 'ids':
+// - 127.0.0.1
+// - ...
func upgradeSchema5to6(diskConf yobj) error {
- log.Printf("%s(): called", funcName())
-
+ log.Printf("Upgrade yaml: 5 to 6")
diskConf["schema_version"] = 6
- clients, ok := diskConf["clients"]
+ clientsVal, ok := diskConf["clients"]
if !ok {
return nil
}
- switch arr := clients.(type) {
- case []any:
- for i := range arr {
- switch c := arr[i].(type) {
- case map[any]any:
- var ipVal any
- ipVal, ok = c["ip"]
- ids := []string{}
- if ok {
- var ip string
- ip, ok = ipVal.(string)
- if !ok {
- log.Fatalf("client.ip is not a string: %v", ipVal)
- return nil
- }
- if len(ip) != 0 {
- ids = append(ids, ip)
- }
- }
+ clients, ok := clientsVal.([]yobj)
+ if !ok {
+ return fmt.Errorf("unexpected type of clients: %T", clientsVal)
+ }
- var macVal any
- macVal, ok = c["mac"]
- if ok {
- var mac string
- mac, ok = macVal.(string)
- if !ok {
- log.Fatalf("client.mac is not a string: %v", macVal)
- return nil
- }
- if len(mac) != 0 {
- ids = append(ids, mac)
- }
- }
+ for i := range clients {
+ c := clients[i]
+ var ids []string
- c["ids"] = ids
- default:
- continue
+ if ipVal, hasIP := c["ip"]; hasIP {
+ var ip string
+ if ip, ok = ipVal.(string); !ok {
+ return fmt.Errorf("client.ip is not a string: %v", ipVal)
+ }
+
+ if ip != "" {
+ ids = append(ids, ip)
}
}
- default:
- return nil
+
+ if macVal, hasMac := c["mac"]; hasMac {
+ var mac string
+ if mac, ok = macVal.(string); !ok {
+ return fmt.Errorf("client.mac is not a string: %v", macVal)
+ }
+
+ if mac != "" {
+ ids = append(ids, mac)
+ }
+ }
+
+ c["ids"] = ids
}
return nil
diff --git a/internal/home/upgrade_test.go b/internal/home/upgrade_test.go
index f4091e84..11820be0 100644
--- a/internal/home/upgrade_test.go
+++ b/internal/home/upgrade_test.go
@@ -68,6 +68,95 @@ func TestUpgradeSchema2to3(t *testing.T) {
assertEqualExcept(t, oldDiskConf, diskConf, excludedEntries, excludedEntries)
}
+func TestUpgradeSchema5to6(t *testing.T) {
+ const newSchemaVer = 6
+
+ testCases := []struct {
+ in yobj
+ want yobj
+ wantErr string
+ name string
+ }{{
+ in: yobj{
+ "clients": []yobj{},
+ },
+ want: yobj{
+ "clients": []yobj{},
+ "schema_version": newSchemaVer,
+ },
+ wantErr: "",
+ name: "no_clients",
+ }, {
+ in: yobj{
+ "clients": []yobj{{"ip": "127.0.0.1"}},
+ },
+ want: yobj{
+ "clients": []yobj{{
+ "ids": []string{"127.0.0.1"},
+ "ip": "127.0.0.1",
+ }},
+ "schema_version": newSchemaVer,
+ },
+ wantErr: "",
+ name: "client_ip",
+ }, {
+ in: yobj{
+ "clients": []yobj{{"mac": "mac"}},
+ },
+ want: yobj{
+ "clients": []yobj{{
+ "ids": []string{"mac"},
+ "mac": "mac",
+ }},
+ "schema_version": newSchemaVer,
+ },
+ wantErr: "",
+ name: "client_mac",
+ }, {
+ in: yobj{
+ "clients": []yobj{{"ip": "127.0.0.1", "mac": "mac"}},
+ },
+ want: yobj{
+ "clients": []yobj{{
+ "ids": []string{"127.0.0.1", "mac"},
+ "ip": "127.0.0.1",
+ "mac": "mac",
+ }},
+ "schema_version": newSchemaVer,
+ },
+ wantErr: "",
+ name: "client_ip_mac",
+ }, {
+ in: yobj{
+ "clients": []yobj{{"ip": 1, "mac": "mac"}},
+ },
+ want: yobj{
+ "clients": []yobj{{"ip": 1, "mac": "mac"}},
+ "schema_version": newSchemaVer,
+ },
+ wantErr: "client.ip is not a string: 1",
+ name: "inv_client_ip",
+ }, {
+ in: yobj{
+ "clients": []yobj{{"ip": "127.0.0.1", "mac": 1}},
+ },
+ want: yobj{
+ "clients": []yobj{{"ip": "127.0.0.1", "mac": 1}},
+ "schema_version": newSchemaVer,
+ },
+ wantErr: "client.mac is not a string: 1",
+ name: "inv_client_mac",
+ }}
+
+ for _, tc := range testCases {
+ t.Run(tc.name, func(t *testing.T) {
+ err := upgradeSchema5to6(tc.in)
+ testutil.AssertErrorMsg(t, tc.wantErr, err)
+ assert.Equal(t, tc.want, tc.in)
+ })
+ }
+}
+
func TestUpgradeSchema7to8(t *testing.T) {
const host = "1.2.3.4"
oldConf := yobj{
diff --git a/internal/querylog/decode.go b/internal/querylog/decode.go
index 4e868b54..af9a7ca4 100644
--- a/internal/querylog/decode.go
+++ b/internal/querylog/decode.go
@@ -3,19 +3,24 @@ package querylog
import (
"encoding/base64"
"encoding/json"
+ "fmt"
"io"
"net"
"strings"
"time"
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
+ "github.com/AdguardTeam/golibs/errors"
"github.com/AdguardTeam/golibs/log"
"github.com/AdguardTeam/urlfilter/rules"
"github.com/miekg/dns"
)
+// logEntryHandler represents a handler for decoding json token to the logEntry
+// struct.
type logEntryHandler func(t json.Token, ent *logEntry) error
+// logEntryHandlers is the map of log entry decode handlers for various keys.
var logEntryHandlers = map[string]logEntryHandler{
"CID": func(t json.Token, ent *logEntry) error {
v, ok := t.(string)
@@ -166,6 +171,7 @@ var logEntryHandlers = map[string]logEntryHandler{
},
}
+// decodeResultRuleKey decodes the token of "Rules" type to logEntry struct.
func decodeResultRuleKey(key string, i int, dec *json.Decoder, ent *logEntry) {
var vToken json.Token
switch key {
@@ -189,6 +195,8 @@ func decodeResultRuleKey(key string, i int, dec *json.Decoder, ent *logEntry) {
}
}
+// decodeVTokenAndAddRule decodes the "Rules" toke as [filtering.ResultRule]
+// and then adds the decoded object to the slice of result rules.
func decodeVTokenAndAddRule(
key string,
i int,
@@ -213,6 +221,8 @@ func decodeVTokenAndAddRule(
return newRules, vToken
}
+// decodeResultRules parses the dec's tokens into logEntry ent interpreting it
+// as a slice of the result rules.
func decodeResultRules(dec *json.Decoder, ent *logEntry) {
for {
delimToken, err := dec.Token()
@@ -224,48 +234,53 @@ func decodeResultRules(dec *json.Decoder, ent *logEntry) {
return
}
- if d, ok := delimToken.(json.Delim); ok {
- if d != '[' {
- log.Debug("decodeResultRules: unexpected delim %q", d)
+ if d, ok := delimToken.(json.Delim); !ok {
+ return
+ } else if d != '[' {
+ log.Debug("decodeResultRules: unexpected delim %q", d)
+ }
+
+ err = decodeResultRuleToken(dec, ent)
+ if err != nil {
+ if err != io.EOF && !errors.Is(err, ErrEndOfToken) {
+ log.Debug("decodeResultRules err: %s", err)
}
- } else {
+
return
}
+ }
+}
- i := 0
- for {
- var keyToken json.Token
- keyToken, err = dec.Token()
- if err != nil {
- if err != io.EOF {
- log.Debug("decodeResultRules err: %s", err)
- }
-
- return
- }
-
- if d, ok := keyToken.(json.Delim); ok {
- switch d {
- case '}':
- i++
- case ']':
- return
- default:
- // Go on.
- }
-
- continue
- }
-
- key, ok := keyToken.(string)
- if !ok {
- log.Debug("decodeResultRules: keyToken is %T (%[1]v) and not string", keyToken)
-
- return
- }
-
- decodeResultRuleKey(key, i, dec, ent)
+// decodeResultRuleToken decodes the tokens of "Rules" type to the logEntry ent.
+func decodeResultRuleToken(dec *json.Decoder, ent *logEntry) (err error) {
+ i := 0
+ for {
+ var keyToken json.Token
+ keyToken, err = dec.Token()
+ if err != nil {
+ // Don't wrap the error, because it's informative enough as is.
+ return err
}
+
+ if d, ok := keyToken.(json.Delim); ok {
+ switch d {
+ case '}':
+ i++
+ case ']':
+ return ErrEndOfToken
+ default:
+ // Go on.
+ }
+
+ continue
+ }
+
+ key, ok := keyToken.(string)
+ if !ok {
+ return fmt.Errorf("keyToken is %T (%[1]v) and not string", keyToken)
+ }
+
+ decodeResultRuleKey(key, i, dec, ent)
}
}
@@ -322,6 +337,8 @@ func decodeResultReverseHosts(dec *json.Decoder, ent *logEntry) {
}
}
+// decodeResultIPList parses the dec's tokens into logEntry ent interpreting it
+// as the result IP addresses list.
func decodeResultIPList(dec *json.Decoder, ent *logEntry) {
for {
itemToken, err := dec.Token()
@@ -355,6 +372,8 @@ func decodeResultIPList(dec *json.Decoder, ent *logEntry) {
}
}
+// decodeResultDNSRewriteResultKey decodes the token of "DNSRewriteResult" type
+// to the logEntry struct.
func decodeResultDNSRewriteResultKey(key string, dec *json.Decoder, ent *logEntry) {
var err error
@@ -395,50 +414,29 @@ func decodeResultDNSRewriteResultKey(key string, dec *json.Decoder, ent *logEntr
log.Debug("decodeResultDNSRewriteResultKey response err: %s", err)
}
- for rrType, rrValues := range ent.Result.DNSRewriteResult.Response {
- switch rrType {
- case
- dns.TypeA,
- dns.TypeAAAA:
- for i, v := range rrValues {
- s, _ := v.(string)
- rrValues[i] = net.ParseIP(s)
- }
- default:
- // Go on.
- }
- }
+ ent.parseDNSRewriteResultIPs()
default:
// Go on.
}
}
+// decodeResultDNSRewriteResult parses the dec's tokens into logEntry ent
+// interpreting it as the result DNSRewriteResult.
func decodeResultDNSRewriteResult(dec *json.Decoder, ent *logEntry) {
for {
- keyToken, err := dec.Token()
+ key, err := parseKeyToken(dec)
if err != nil {
- if err != io.EOF {
- log.Debug("decodeResultDNSRewriteResult err: %s", err)
+ if err != io.EOF && !errors.Is(err, ErrEndOfToken) {
+ log.Debug("decodeResultDNSRewriteResult: %s", err)
}
return
}
- if d, ok := keyToken.(json.Delim); ok {
- if d == '}' {
- return
- }
-
+ if key == "" {
continue
}
- key, ok := keyToken.(string)
- if !ok {
- log.Debug("decodeResultDNSRewriteResult: keyToken is %T (%[1]v) and not string", keyToken)
-
- return
- }
-
decodeResultDNSRewriteResultKey(key, dec, ent)
}
}
@@ -474,34 +472,51 @@ func translateResult(ent *logEntry) {
res.IPList = nil
}
+// ErrEndOfToken is an error returned by parse key token when the closing
+// bracket is found.
+const ErrEndOfToken errors.Error = "end of token"
+
+// parseKeyToken parses the dec's token key.
+func parseKeyToken(dec *json.Decoder) (key string, err error) {
+ keyToken, err := dec.Token()
+ if err != nil {
+ return "", err
+ }
+
+ if d, ok := keyToken.(json.Delim); ok {
+ if d == '}' {
+ return "", ErrEndOfToken
+ }
+
+ return "", nil
+ }
+
+ key, ok := keyToken.(string)
+ if !ok {
+ return "", fmt.Errorf("keyToken is %T (%[1]v) and not string", keyToken)
+ }
+
+ return key, nil
+}
+
+// decodeResult decodes a token of "Result" type to logEntry struct.
func decodeResult(dec *json.Decoder, ent *logEntry) {
defer translateResult(ent)
for {
- keyToken, err := dec.Token()
+ key, err := parseKeyToken(dec)
if err != nil {
- if err != io.EOF {
- log.Debug("decodeResult err: %s", err)
+ if err != io.EOF && !errors.Is(err, ErrEndOfToken) {
+ log.Debug("decodeResult: %s", err)
}
return
}
- if d, ok := keyToken.(json.Delim); ok {
- if d == '}' {
- return
- }
-
+ if key == "" {
continue
}
- key, ok := keyToken.(string)
- if !ok {
- log.Debug("decodeResult: keyToken is %T (%[1]v) and not string", keyToken)
-
- return
- }
-
decHandler, ok := resultDecHandlers[key]
if ok {
decHandler(dec, ent)
@@ -527,13 +542,16 @@ func decodeResult(dec *json.Decoder, ent *logEntry) {
}
}
+// resultHandlers is the map of log entry decode handlers for various keys.
var resultHandlers = map[string]logEntryHandler{
"IsFiltered": func(t json.Token, ent *logEntry) error {
v, ok := t.(bool)
if !ok {
return nil
}
+
ent.Result.IsFiltered = v
+
return nil
},
"Rule": func(t json.Token, ent *logEntry) error {
@@ -578,11 +596,14 @@ var resultHandlers = map[string]logEntryHandler{
if !ok {
return nil
}
+
i, err := v.Int64()
if err != nil {
return err
}
+
ent.Result.Reason = filtering.Reason(i)
+
return nil
},
"ServiceName": func(t json.Token, ent *logEntry) error {
@@ -607,6 +628,7 @@ var resultHandlers = map[string]logEntryHandler{
},
}
+// resultDecHandlers is the map of decode handlers for various keys.
var resultDecHandlers = map[string]func(dec *json.Decoder, ent *logEntry){
"ReverseHosts": decodeResultReverseHosts,
"IPList": decodeResultIPList,
@@ -614,9 +636,11 @@ var resultDecHandlers = map[string]func(dec *json.Decoder, ent *logEntry){
"DNSRewriteResult": decodeResultDNSRewriteResult,
}
+// decodeLogEntry decodes string str to logEntry ent.
func decodeLogEntry(ent *logEntry, str string) {
dec := json.NewDecoder(strings.NewReader(str))
dec.UseNumber()
+
for {
keyToken, err := dec.Token()
if err != nil {
diff --git a/internal/querylog/decode_test.go b/internal/querylog/decode_test.go
index aa3de021..8e557e50 100644
--- a/internal/querylog/decode_test.go
+++ b/internal/querylog/decode_test.go
@@ -182,8 +182,7 @@ func TestDecodeLogEntry(t *testing.T) {
if tc.want == "" {
assert.Empty(t, s)
} else {
- assert.True(t, strings.HasSuffix(s, tc.want),
- "got %q", s)
+ assert.True(t, strings.HasSuffix(s, tc.want), "got %q", s)
}
logOutput.Reset()
diff --git a/internal/querylog/entry.go b/internal/querylog/entry.go
index eae99385..c3c800ed 100644
--- a/internal/querylog/entry.go
+++ b/internal/querylog/entry.go
@@ -68,3 +68,19 @@ func (e *logEntry) addResponse(resp *dns.Msg, isOrig bool) {
log.Error("querylog: %s", err)
}
}
+
+// parseDNSRewriteResultIPs fills logEntry's DNSRewriteResult response records
+// with the IP addresses parsed from the raw strings.
+func (e *logEntry) parseDNSRewriteResultIPs() {
+ for rrType, rrValues := range e.Result.DNSRewriteResult.Response {
+ switch rrType {
+ case dns.TypeA, dns.TypeAAAA:
+ for i, v := range rrValues {
+ s, _ := v.(string)
+ rrValues[i] = net.ParseIP(s)
+ }
+ default:
+ // Go on.
+ }
+ }
+}
diff --git a/internal/querylog/qlog.go b/internal/querylog/qlog.go
index 0a283299..4d543587 100644
--- a/internal/querylog/qlog.go
+++ b/internal/querylog/qlog.go
@@ -16,32 +16,35 @@ import (
"github.com/miekg/dns"
)
-const (
- queryLogFileName = "querylog.json" // .gz added during compression
-)
+// queryLogFileName is a name of the log file. ".gz" extension is added later
+// during compression.
+const queryLogFileName = "querylog.json"
-// queryLog is a structure that writes and reads the DNS query log
+// queryLog is a structure that writes and reads the DNS query log.
type queryLog struct {
- findClient func(ids []string) (c *Client, err error)
-
// confMu protects conf.
confMu *sync.RWMutex
- conf *Config
+
+ conf *Config
+ anonymizer *aghnet.IPMut
+
+ findClient func(ids []string) (c *Client, err error)
// logFile is the path to the log file.
logFile string
- // bufferLock protects buffer.
- bufferLock sync.RWMutex
// buffer contains recent log entries. The entries in this buffer must not
// be modified.
buffer []*logEntry
- fileFlushLock sync.Mutex // synchronize a file-flushing goroutine and main thread
- flushPending bool // don't start another goroutine while the previous one is still running
+ // bufferLock protects buffer.
+ bufferLock sync.RWMutex
+
+ // fileFlushLock synchronizes a file-flushing goroutine and main thread.
+ fileFlushLock sync.Mutex
fileWriteLock sync.Mutex
- anonymizer *aghnet.IPMut
+ flushPending bool
}
// ClientProto values are names of the client protocols.
@@ -155,6 +158,43 @@ func (l *queryLog) clear() {
log.Debug("querylog: cleared")
}
+// newLogEntry creates an instance of logEntry from parameters.
+func newLogEntry(params *AddParams) (entry *logEntry) {
+ q := params.Question.Question[0]
+
+ entry = &logEntry{
+ // TODO(d.kolyshev): Export this timestamp to func params.
+ Time: time.Now(),
+
+ QHost: strings.ToLower(q.Name[:len(q.Name)-1]),
+ QType: dns.Type(q.Qtype).String(),
+ QClass: dns.Class(q.Qclass).String(),
+
+ ClientID: params.ClientID,
+ ClientProto: params.ClientProto,
+
+ Result: *params.Result,
+ Upstream: params.Upstream,
+
+ IP: params.ClientIP,
+
+ Elapsed: params.Elapsed,
+
+ Cached: params.Cached,
+ AuthenticatedData: params.AuthenticatedData,
+ }
+
+ if params.ReqECS != nil {
+ entry.ReqECS = params.ReqECS.String()
+ }
+
+ entry.addResponse(params.Answer, false)
+ entry.addResponse(params.OrigAnswer, true)
+
+ return entry
+}
+
+// Add implements the [QueryLog] interface for *queryLog.
func (l *queryLog) Add(params *AddParams) {
var isEnabled, fileIsEnabled bool
var memSize uint32
@@ -181,35 +221,7 @@ func (l *queryLog) Add(params *AddParams) {
params.Result = &filtering.Result{}
}
- now := time.Now()
- q := params.Question.Question[0]
- entry := &logEntry{
- Time: now,
-
- QHost: strings.ToLower(q.Name[:len(q.Name)-1]),
- QType: dns.Type(q.Qtype).String(),
- QClass: dns.Class(q.Qclass).String(),
-
- ClientID: params.ClientID,
- ClientProto: params.ClientProto,
-
- Result: *params.Result,
- Upstream: params.Upstream,
-
- IP: params.ClientIP,
-
- Elapsed: params.Elapsed,
-
- Cached: params.Cached,
- AuthenticatedData: params.AuthenticatedData,
- }
-
- if params.ReqECS != nil {
- entry.ReqECS = params.ReqECS.String()
- }
-
- entry.addResponse(params.Answer, false)
- entry.addResponse(params.OrigAnswer, true)
+ entry := newLogEntry(params)
needFlush := false
func() {
diff --git a/internal/querylog/qlog_test.go b/internal/querylog/qlog_test.go
index d8395c89..58fcd704 100644
--- a/internal/querylog/qlog_test.go
+++ b/internal/querylog/qlog_test.go
@@ -6,7 +6,6 @@ import (
"testing"
"github.com/AdguardTeam/AdGuardHome/internal/filtering"
- "github.com/AdguardTeam/dnsproxy/proxyutil"
"github.com/AdguardTeam/golibs/stringutil"
"github.com/AdguardTeam/golibs/testutil"
"github.com/AdguardTeam/golibs/timeutil"
@@ -46,9 +45,10 @@ func TestQueryLog(t *testing.T) {
addEntry(l, "example.com", net.IPv4(1, 1, 1, 4), net.IPv4(2, 2, 2, 4))
type tcAssertion struct {
- num int
- host string
- answer, client net.IP
+ host string
+ answer net.IP
+ client net.IP
+ num int
}
testCases := []struct {
@@ -367,6 +367,6 @@ func assertLogEntry(t *testing.T, entry *logEntry, host string, answer, client n
require.NoError(t, msg.Unpack(entry.Answer))
require.Len(t, msg.Answer, 1)
- ip := proxyutil.IPFromRR(msg.Answer[0]).To16()
- assert.Equal(t, answer, ip)
+ a := testutil.RequireTypeAssert[*dns.A](t, msg.Answer[0])
+ assert.Equal(t, answer, a.A.To16())
}
diff --git a/internal/querylog/qlogfile.go b/internal/querylog/qlogfile.go
index fd6c5226..397840c9 100644
--- a/internal/querylog/qlogfile.go
+++ b/internal/querylog/qlogfile.go
@@ -12,141 +12,181 @@ import (
"github.com/AdguardTeam/golibs/log"
)
-// Timestamp not found errors.
const (
- ErrTSNotFound errors.Error = "ts not found"
- ErrTSTooLate errors.Error = "ts too late"
- ErrTSTooEarly errors.Error = "ts too early"
+ // Timestamp not found errors.
+ errTSNotFound errors.Error = "ts not found"
+ errTSTooLate errors.Error = "ts too late"
+ errTSTooEarly errors.Error = "ts too early"
+
+ // maxEntrySize is a maximum size of the entry.
+ //
+ // TODO: Find a way to grow buffer instead of relying on this value when
+ // reading strings.
+ maxEntrySize = 16 * 1024
+
+ // bufferSize should be enough for at least this number of entries.
+ bufferSize = 100 * maxEntrySize
)
-// TODO: Find a way to grow buffer instead of relying on this value when reading strings
-const maxEntrySize = 16 * 1024
-
-// buffer should be enough for at least this number of entries
-const bufferSize = 100 * maxEntrySize
-
-// QLogFile represents a single query log file
-// It allows reading from the file in the reverse order
+// qLogFile represents a single query log file. It allows reading from the
+// file in the reverse order.
//
-// Please note that this is a stateful object.
-// Internally, it contains a pointer to a specific position in the file,
-// and it reads lines in reverse order starting from that position.
-type QLogFile struct {
- file *os.File // the query log file
- position int64 // current position in the file
+// Please note, that this is a stateful object. Internally, it contains a
+// pointer to a specific position in the file, and it reads lines in reverse
+// order starting from that position.
+type qLogFile struct {
+ // file is the query log file.
+ file *os.File
- buffer []byte // buffer that we've read from the file
- bufferStart int64 // start of the buffer (in the file)
- bufferLen int // buffer len
+ // buffer that we've read from the file.
+ buffer []byte
- lock sync.Mutex // We use mutex to make it thread-safe
+ // lock is a mutex to make it thread-safe.
+ lock sync.Mutex
+
+ // position is the position in the file.
+ position int64
+
+ // bufferStart is the start of the buffer (in the file).
+ bufferStart int64
+
+ // bufferLen is the length of the buffer.
+ bufferLen int
}
-// NewQLogFile initializes a new instance of the QLogFile
-func NewQLogFile(path string) (*QLogFile, error) {
+// newQLogFile initializes a new instance of the qLogFile.
+func newQLogFile(path string) (qf *qLogFile, err error) {
f, err := os.OpenFile(path, os.O_RDONLY, 0o644)
if err != nil {
return nil, err
}
- return &QLogFile{
- file: f,
- }, nil
+ return &qLogFile{file: f}, nil
+}
+
+// validateQLogLineIdx returns error if the line index is not valid to continue
+// search.
+func (q *qLogFile) validateQLogLineIdx(lineIdx, lastProbeLineIdx, ts, fSize int64) (err error) {
+ if lineIdx == lastProbeLineIdx {
+ if lineIdx == 0 {
+ return errTSTooEarly
+ }
+
+ // If we're testing the same line twice then most likely the scope is
+ // too narrow and we won't find anything anymore in any other file.
+ return fmt.Errorf("looking up timestamp %d in %q: %w", ts, q.file.Name(), errTSNotFound)
+ } else if lineIdx == fSize {
+ return errTSTooLate
+ }
+
+ return nil
}
// seekTS performs binary search in the query log file looking for a record
-// with the specified timestamp. Once the record is found, it sets
-// "position" so that the next ReadNext call returned that record.
+// with the specified timestamp. Once the record is found, it sets "position"
+// so that the next ReadNext call returned that record.
//
// The algorithm is rather simple:
-// 1. It starts with the position in the middle of a file
-// 2. Shifts back to the beginning of the line
-// 3. Checks the log record timestamp
-// 4. If it is lower than the timestamp we are looking for,
-// it shifts seek position to 3/4 of the file. Otherwise, to 1/4 of the file.
-// 5. It performs the search again, every time the search scope is narrowed twice.
+// 1. It starts with the position in the middle of a file.
+// 2. Shifts back to the beginning of the line.
+// 3. Checks the log record timestamp.
+// 4. If it is lower than the timestamp we are looking for, it shifts seek
+// position to 3/4 of the file. Otherwise, to 1/4 of the file.
+// 5. It performs the search again, every time the search scope is narrowed
+// twice.
//
// Returns:
-// * It returns the position of the the line with the timestamp we were looking for
-// so that when we call "ReadNext" this line was returned.
-// * Depth of the search (how many times we compared timestamps).
-// * If we could not find it, it returns one of the errors described above.
-func (q *QLogFile) seekTS(timestamp int64) (int64, int, error) {
+// - It returns the position of the line with the timestamp we were looking
+// for so that when we call "ReadNext" this line was returned.
+// - Depth of the search (how many times we compared timestamps).
+// - If we could not find it, it returns one of the errors described above.
+func (q *qLogFile) seekTS(timestamp int64) (pos int64, depth int, err error) {
q.lock.Lock()
defer q.lock.Unlock()
- // Empty the buffer
+ // Empty the buffer.
q.buffer = nil
- // First of all, check the file size
+ // First of all, check the file size.
fileInfo, err := q.file.Stat()
if err != nil {
return 0, 0, err
}
- // Define the search scope
- start := int64(0) // start of the search interval (position in the file)
- end := fileInfo.Size() // end of the search interval (position in the file)
- probe := (end - start) / 2 // probe -- approximate index of the line we'll try to check
+ // Define the search scope.
+
+ // Start of the search interval (position in the file).
+ start := int64(0)
+ // End of the search interval (position in the file).
+ end := fileInfo.Size()
+ // Probe is the approximate index of the line we'll try to check.
+ probe := (end - start) / 2
+
var line string
- var lineIdx int64 // index of the probe line in the file
+ // Index of the probe line in the file.
+ var lineIdx int64
var lineEndIdx int64
- var lastProbeLineIdx int64 // index of the last probe line
+ // Index of the last probe line.
+ var lastProbeLineIdx int64
lastProbeLineIdx = -1
- // Count seek depth in order to detect mistakes
- // If depth is too large, we should stop the search
- depth := 0
-
+ // Count seek depth in order to detect mistakes. If depth is too large,
+ // we should stop the search.
for {
- // Get the line at the specified position
+ // Get the line at the specified position.
line, lineIdx, lineEndIdx, err = q.readProbeLine(probe)
if err != nil {
return 0, depth, err
}
- if lineIdx == lastProbeLineIdx {
- if lineIdx == 0 {
- return 0, depth, ErrTSTooEarly
- }
-
- // If we're testing the same line twice then most likely
- // the scope is too narrow and we won't find anything
- // anymore in any other file.
- return 0, depth, fmt.Errorf("looking up timestamp %d in %q: %w", timestamp, q.file.Name(), ErrTSNotFound)
- } else if lineIdx == fileInfo.Size() {
- return 0, depth, ErrTSTooLate
+ // Check if the line index if invalid.
+ err = q.validateQLogLineIdx(lineIdx, lastProbeLineIdx, timestamp, fileInfo.Size())
+ if err != nil {
+ return 0, depth, err
}
- // Save the last found idx
+ // Save the last found idx.
lastProbeLineIdx = lineIdx
- // Get the timestamp from the query log record
+ // Get the timestamp from the query log record.
ts := readQLogTimestamp(line)
if ts == 0 {
- return 0, depth, fmt.Errorf("looking up timestamp %d in %q: record %q has empty timestamp", timestamp, q.file.Name(), line)
+ return 0, depth, fmt.Errorf(
+ "looking up timestamp %d in %q: record %q has empty timestamp",
+ timestamp,
+ q.file.Name(),
+ line,
+ )
}
if ts == timestamp {
- // Hurray, returning the result
+ // Hurray, returning the result.
break
}
- // Narrow the scope and repeat the search
+ // Narrow the scope and repeat the search.
if ts > timestamp {
- // If the timestamp we're looking for is OLDER than what we found
- // Then the line is somewhere on the LEFT side from the current probe position
+ // If the timestamp we're looking for is OLDER than what we found,
+ // then the line is somewhere on the LEFT side from the current
+ // probe position.
end = lineIdx
} else {
- // If the timestamp we're looking for is NEWER than what we found
- // Then the line is somewhere on the RIGHT side from the current probe position
+ // If the timestamp we're looking for is NEWER than what we found,
+ // then the line is somewhere on the RIGHT side from the current
+ // probe position.
start = lineEndIdx
}
probe = start + (end-start)/2
depth++
if depth >= 100 {
- return 0, depth, fmt.Errorf("looking up timestamp %d in %q: depth %d too high: %w", timestamp, q.file.Name(), depth, ErrTSNotFound)
+ return 0, depth, fmt.Errorf(
+ "looking up timestamp %d in %q: depth %d too high: %w",
+ timestamp,
+ q.file.Name(),
+ depth,
+ errTSNotFound,
+ )
}
}
@@ -154,37 +194,39 @@ func (q *QLogFile) seekTS(timestamp int64) (int64, int, error) {
return q.position, depth, nil
}
-// SeekStart changes the current position to the end of the file
-// Please note that we're reading query log in the reverse order
-// and that's why log start is actually the end of file
+// SeekStart changes the current position to the end of the file. Please note,
+// that we're reading query log in the reverse order and that's why log start
+// is actually the end of file.
//
-// Returns nil if we were able to change the current position.
-// Returns error in any other case.
-func (q *QLogFile) SeekStart() (int64, error) {
+// Returns nil if we were able to change the current position. Returns error
+// in any other case.
+func (q *qLogFile) SeekStart() (int64, error) {
q.lock.Lock()
defer q.lock.Unlock()
- // Empty the buffer
+ // Empty the buffer.
q.buffer = nil
- // First of all, check the file size
+ // First of all, check the file size.
fileInfo, err := q.file.Stat()
if err != nil {
return 0, err
}
- // Place the position to the very end of file
+ // Place the position to the very end of file.
q.position = fileInfo.Size() - 1
if q.position < 0 {
q.position = 0
}
+
return q.position, nil
}
-// ReadNext reads the next line (in the reverse order) from the file
-// and shifts the current position left to the next (actually prev) line.
-// returns io.EOF if there's nothing to read more
-func (q *QLogFile) ReadNext() (string, error) {
+// ReadNext reads the next line (in the reverse order) from the file and shifts
+// the current position left to the next (actually prev) line.
+//
+// Returns io.EOF if there's nothing more to read.
+func (q *qLogFile) ReadNext() (string, error) {
q.lock.Lock()
defer q.lock.Unlock()
@@ -197,35 +239,34 @@ func (q *QLogFile) ReadNext() (string, error) {
return "", err
}
- // Shift position
+ // Shift position.
if lineIdx == 0 {
q.position = 0
} else {
- // there's usually a line break before the line
- // so we should shift one more char left from the line
- // line\nline
+ // There's usually a line break before the line, so we should shift one
+ // more char left from the line "\nline".
q.position = lineIdx - 1
}
return line, err
}
-// Close frees the underlying resources
-func (q *QLogFile) Close() error {
+// Close frees the underlying resources.
+func (q *qLogFile) Close() error {
return q.file.Close()
}
-// readNextLine reads the next line from the specified position
-// this line actually have to END on that position.
+// readNextLine reads the next line from the specified position. This line
+// actually have to END on that position.
//
-// the algorithm is:
-// 1. check if we have the buffer initialized
-// 2. if it is, scan it and look for the line there
-// 3. if we cannot find the line there, read the prev chunk into the buffer
-// 4. read the line from the buffer
-func (q *QLogFile) readNextLine(position int64) (string, int64, error) {
+// The algorithm is:
+// 1. Check if we have the buffer initialized.
+// 2. If it is so, scan it and look for the line there.
+// 3. If we cannot find the line there, read the prev chunk into the buffer.
+// 4. Read the line from the buffer.
+func (q *qLogFile) readNextLine(position int64) (string, int64, error) {
relativePos := position - q.bufferStart
if q.buffer == nil || (relativePos < maxEntrySize && q.bufferStart != 0) {
- // Time to re-init the buffer
+ // Time to re-init the buffer.
err := q.initBuffer(position)
if err != nil {
return "", 0, err
@@ -233,8 +274,7 @@ func (q *QLogFile) readNextLine(position int64) (string, int64, error) {
relativePos = position - q.bufferStart
}
- // Look for the end of the prev line
- // This is where we'll read from
+ // Look for the end of the prev line, this is where we'll read from.
startLine := int64(0)
for i := relativePos - 1; i >= 0; i-- {
if q.buffer[i] == '\n' {
@@ -245,18 +285,19 @@ func (q *QLogFile) readNextLine(position int64) (string, int64, error) {
line := string(q.buffer[startLine:relativePos])
lineIdx := q.bufferStart + startLine
+
return line, lineIdx, nil
}
-// initBuffer initializes the QLogFile buffer.
-// the goal is to read a chunk of file that includes the line with the specified position.
-func (q *QLogFile) initBuffer(position int64) error {
+// initBuffer initializes the qLogFile buffer. The goal is to read a chunk of
+// file that includes the line with the specified position.
+func (q *qLogFile) initBuffer(position int64) error {
q.bufferStart = int64(0)
if position > bufferSize {
q.bufferStart = position - bufferSize
}
- // Seek to this position
+ // Seek to this position.
_, err := q.file.Seek(q.bufferStart, io.SeekStart)
if err != nil {
return err
@@ -271,34 +312,35 @@ func (q *QLogFile) initBuffer(position int64) error {
return err
}
-// readProbeLine reads a line that includes the specified position
-// this method is supposed to be used when we use binary search in the Seek method
-// in the case of consecutive reads, use readNext (it uses a better buffer)
-func (q *QLogFile) readProbeLine(position int64) (string, int64, int64, error) {
- // First of all, we should read a buffer that will include the query log line
- // In order to do this, we'll define the boundaries
+// readProbeLine reads a line that includes the specified position. This
+// method is supposed to be used when we use binary search in the Seek method.
+// In the case of consecutive reads, use readNext, cause it uses better buffer.
+func (q *qLogFile) readProbeLine(position int64) (string, int64, int64, error) {
+ // First of all, we should read a buffer that will include the query log
+ // line. In order to do this, we'll define the boundaries.
seekPosition := int64(0)
- relativePos := position // position relative to the buffer we're going to read
+ // Position relative to the buffer we're going to read.
+ relativePos := position
if position > maxEntrySize {
seekPosition = position - maxEntrySize
relativePos = maxEntrySize
}
- // Seek to this position
+ // Seek to this position.
_, err := q.file.Seek(seekPosition, io.SeekStart)
if err != nil {
return "", 0, 0, err
}
- // The buffer size is 2*maxEntrySize
+ // The buffer size is 2*maxEntrySize.
buffer := make([]byte, maxEntrySize*2)
bufferLen, err := q.file.Read(buffer)
if err != nil {
return "", 0, 0, err
}
- // Now start looking for the new line character starting
- // from the relativePos and going left
+ // Now start looking for the new line character starting from the
+ // relativePos and going left.
startLine := int64(0)
for i := relativePos - 1; i >= 0; i-- {
if buffer[i] == '\n' {
@@ -306,7 +348,7 @@ func (q *QLogFile) readProbeLine(position int64) (string, int64, int64, error) {
break
}
}
- // Looking for the end of line now
+ // Looking for the end of line now.
endLine := int64(bufferLen)
lineEndIdx := endLine + seekPosition
for i := relativePos; i < int64(bufferLen); i++ {
@@ -317,13 +359,13 @@ func (q *QLogFile) readProbeLine(position int64) (string, int64, int64, error) {
}
}
- // Finally we can return the string we were looking for
+ // Finally we can return the string we were looking for.
lineIdx := startLine + seekPosition
return string(buffer[startLine:endLine]), lineIdx, lineEndIdx, nil
}
-// readJSONvalue reads a JSON string in form of '"key":"value"'. prefix must be
-// of the form '"key":"' to generate less garbage.
+// readJSONValue reads a JSON string in form of '"key":"value"'. prefix must
+// be of the form '"key":"' to generate less garbage.
func readJSONValue(s, prefix string) string {
i := strings.Index(s, prefix)
if i == -1 {
@@ -340,7 +382,7 @@ func readJSONValue(s, prefix string) string {
return s[start:end]
}
-// readQLogTimestamp reads the timestamp field from the query log line
+// readQLogTimestamp reads the timestamp field from the query log line.
func readQLogTimestamp(str string) int64 {
val := readJSONValue(str, `"T":"`)
if len(val) == 0 {
@@ -351,10 +393,12 @@ func readQLogTimestamp(str string) int64 {
log.Error("Couldn't find timestamp: %s", str)
return 0
}
+
tm, err := time.Parse(time.RFC3339Nano, val)
if err != nil {
log.Error("Couldn't parse timestamp: %s", val)
return 0
}
+
return tm.UnixNano()
}
diff --git a/internal/querylog/qlogfile_test.go b/internal/querylog/qlogfile_test.go
index 3e32420f..f91d3911 100644
--- a/internal/querylog/qlogfile_test.go
+++ b/internal/querylog/qlogfile_test.go
@@ -72,15 +72,15 @@ func prepareTestFiles(t *testing.T, filesNum, linesNum int) []string {
return files
}
-// newTestQLogFile creates new *QLogFile for tests and registers the required
+// newTestQLogFile creates new *qLogFile for tests and registers the required
// cleanup functions.
-func newTestQLogFile(t *testing.T, linesNum int) (file *QLogFile) {
+func newTestQLogFile(t *testing.T, linesNum int) (file *qLogFile) {
t.Helper()
testFile := prepareTestFiles(t, 1, linesNum)[0]
- // Create the new QLogFile instance.
- file, err := NewQLogFile(testFile)
+ // Create the new qLogFile instance.
+ file, err := newQLogFile(testFile)
require.NoError(t, err)
assert.NotNil(t, file)
@@ -240,7 +240,7 @@ func TestQLogFile_SeekTS_bad(t *testing.T) {
}
}
-func getQLogFileLine(q *QLogFile, lineNumber int) (line string, err error) {
+func getQLogFileLine(q *qLogFile, lineNumber int) (line string, err error) {
if _, err = q.SeekStart(); err != nil {
return line, err
}
@@ -256,7 +256,7 @@ func getQLogFileLine(q *QLogFile, lineNumber int) (line string, err error) {
// Check adding and loading (with filtering) entries from disk and memory.
func TestQLogFile(t *testing.T) {
- // Create the new QLogFile instance.
+ // Create the new qLogFile instance.
q := newTestQLogFile(t, 2)
// Seek to the start.
@@ -285,7 +285,7 @@ func TestQLogFile(t *testing.T) {
assert.Empty(t, line)
}
-func NewTestQLogFileData(t *testing.T, data string) (file *QLogFile) {
+func newTestQLogFileData(t *testing.T, data string) (file *qLogFile) {
f, err := os.CreateTemp(t.TempDir(), "*.txt")
require.NoError(t, err)
testutil.CleanupAndRequireSuccess(t, f.Close)
@@ -293,7 +293,7 @@ func NewTestQLogFileData(t *testing.T, data string) (file *QLogFile) {
_, err = f.WriteString(data)
require.NoError(t, err)
- file, err = NewQLogFile(f.Name())
+ file, err = newQLogFile(f.Name())
require.NoError(t, err)
testutil.CleanupAndRequireSuccess(t, file.Close)
@@ -309,9 +309,9 @@ func TestQLog_Seek(t *testing.T) {
timestamp, _ := time.Parse(time.RFC3339Nano, "2020-08-31T18:44:25.376690873+03:00")
testCases := []struct {
+ wantErr error
name string
delta int
- wantErr error
wantDepth int
}{{
name: "ok",
@@ -321,12 +321,12 @@ func TestQLog_Seek(t *testing.T) {
}, {
name: "too_late",
delta: 2,
- wantErr: ErrTSTooLate,
+ wantErr: errTSTooLate,
wantDepth: 2,
}, {
name: "too_early",
delta: -2,
- wantErr: ErrTSTooEarly,
+ wantErr: errTSTooEarly,
wantDepth: 1,
}}
@@ -338,7 +338,7 @@ func TestQLog_Seek(t *testing.T) {
timestamp.Add(time.Second).Format(time.RFC3339Nano),
)
- q := NewTestQLogFileData(t, data)
+ q := newTestQLogFileData(t, data)
_, depth, err := q.seekTS(timestamp.Add(time.Second * time.Duration(tc.delta)).UnixNano())
require.Truef(t, errors.Is(err, tc.wantErr), "%v", err)
diff --git a/internal/querylog/qlogreader.go b/internal/querylog/qlogreader.go
index 3454a441..610de02f 100644
--- a/internal/querylog/qlogreader.go
+++ b/internal/querylog/qlogreader.go
@@ -9,36 +9,36 @@ import (
"github.com/AdguardTeam/golibs/log"
)
-// QLogReader allows reading from multiple query log files in the reverse order.
+// qLogReader allows reading from multiple query log files in the reverse
+// order.
//
-// Please note that this is a stateful object.
-// Internally, it contains a pointer to a particular query log file, and
-// to a specific position in this file, and it reads lines in reverse order
-// starting from that position.
-type QLogReader struct {
- // qFiles - array with the query log files
- // The order is - from oldest to newest
- qFiles []*QLogFile
+// Please note that this is a stateful object. Internally, it contains a
+// pointer to a particular query log file, and to a specific position in this
+// file, and it reads lines in reverse order starting from that position.
+type qLogReader struct {
+ // qFiles is an array with the query log files. The order is from oldest
+ // to newest.
+ qFiles []*qLogFile
- currentFile int // Index of the current file
+ // currentFile is the index of the current file.
+ currentFile int
}
-// NewQLogReader initializes a QLogReader instance
-// with the specified files
-func NewQLogReader(files []string) (*QLogReader, error) {
- qFiles := make([]*QLogFile, 0)
+// newQLogReader initializes a qLogReader instance with the specified files.
+func newQLogReader(files []string) (*qLogReader, error) {
+ qFiles := make([]*qLogFile, 0)
for _, f := range files {
- q, err := NewQLogFile(f)
+ q, err := newQLogFile(f)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
continue
}
// Close what we've already opened.
- cerr := closeQFiles(qFiles)
- if cerr != nil {
- log.Debug("querylog: closing files: %s", cerr)
+ cErr := closeQFiles(qFiles)
+ if cErr != nil {
+ log.Debug("querylog: closing files: %s", cErr)
}
return nil, err
@@ -47,31 +47,28 @@ func NewQLogReader(files []string) (*QLogReader, error) {
qFiles = append(qFiles, q)
}
- return &QLogReader{
- qFiles: qFiles,
- currentFile: (len(qFiles) - 1),
- }, nil
+ return &qLogReader{qFiles: qFiles, currentFile: len(qFiles) - 1}, nil
}
// seekTS performs binary search of a query log record with the specified
-// timestamp. If the record is found, it sets QLogReader's position to point to
-// that line, so that the next ReadNext call returned this line.
-func (r *QLogReader) seekTS(timestamp int64) (err error) {
+// timestamp. If the record is found, it sets qLogReader's position to point
+// to that line, so that the next ReadNext call returned this line.
+func (r *qLogReader) seekTS(timestamp int64) (err error) {
for i := len(r.qFiles) - 1; i >= 0; i-- {
q := r.qFiles[i]
_, _, err = q.seekTS(timestamp)
if err != nil {
- if errors.Is(err, ErrTSTooEarly) {
+ if errors.Is(err, errTSTooEarly) {
// Look at the next file, since we've reached the end of this
// one. If there is no next file, it's not found.
- err = ErrTSNotFound
+ err = errTSNotFound
continue
- } else if errors.Is(err, ErrTSTooLate) {
+ } else if errors.Is(err, errTSTooLate) {
// Just seek to the start then. timestamp is probably between
// the end of the previous one and the start of this one.
return r.SeekStart()
- } else if errors.Is(err, ErrTSNotFound) {
+ } else if errors.Is(err, errTSNotFound) {
return err
} else {
return fmt.Errorf("seekts: file at index %d: %w", i, err)
@@ -80,7 +77,7 @@ func (r *QLogReader) seekTS(timestamp int64) (err error) {
// The search is finished, and the searched element has been found.
// Update currentFile only, position is already set properly in
- // QLogFile.
+ // qLogFile.
r.currentFile = i
return nil
@@ -93,13 +90,13 @@ func (r *QLogReader) seekTS(timestamp int64) (err error) {
return nil
}
-// SeekStart changes the current position to the end of the newest file
-// Please note that we're reading query log in the reverse order
-// and that's why log start is actually the end of file
+// SeekStart changes the current position to the end of the newest file.
+// Please note that we're reading query log in the reverse order and that's why
+// the log starts actually at the end of file.
//
-// Returns nil if we were able to change the current position.
-// Returns error in any other case.
-func (r *QLogReader) SeekStart() error {
+// Returns nil if we were able to change the current position. Returns error
+// in any other cases.
+func (r *qLogReader) SeekStart() error {
if len(r.qFiles) == 0 {
return nil
}
@@ -110,10 +107,12 @@ func (r *QLogReader) SeekStart() error {
return err
}
-// ReadNext reads the next line (in the reverse order) from the query log files.
-// and shifts the current position left to the next (actually prev) line (or the next file).
-// returns io.EOF if there's nothing to read more.
-func (r *QLogReader) ReadNext() (string, error) {
+// ReadNext reads the next line (in the reverse order) from the query log
+// files. Then shifts the current position left to the next (actually prev)
+// line (or the next file).
+//
+// Returns io.EOF if there is nothing more to read.
+func (r *qLogReader) ReadNext() (string, error) {
if len(r.qFiles) == 0 {
return "", io.EOF
}
@@ -122,7 +121,7 @@ func (r *QLogReader) ReadNext() (string, error) {
q := r.qFiles[r.currentFile]
line, err := q.ReadNext()
if err != nil {
- // Shift to the older file
+ // Shift to the older file.
r.currentFile--
if r.currentFile < 0 {
break
@@ -130,10 +129,10 @@ func (r *QLogReader) ReadNext() (string, error) {
q = r.qFiles[r.currentFile]
- // Set it's position to the start right away
+ // Set its position to the start right away.
_, err = q.SeekStart()
- // This is unexpected, return an error right away
+ // This is unexpected, return an error right away.
if err != nil {
return "", err
}
@@ -142,17 +141,17 @@ func (r *QLogReader) ReadNext() (string, error) {
}
}
- // Nothing to read anymore
+ // Nothing to read anymore.
return "", io.EOF
}
-// Close closes the QLogReader
-func (r *QLogReader) Close() error {
+// Close closes the qLogReader.
+func (r *qLogReader) Close() error {
return closeQFiles(r.qFiles)
}
-// closeQFiles - helper method to close multiple QLogFile instances
-func closeQFiles(qFiles []*QLogFile) error {
+// closeQFiles is a helper method to close multiple qLogFile instances.
+func closeQFiles(qFiles []*qLogFile) error {
var errs []error
for _, q := range qFiles {
@@ -163,7 +162,7 @@ func closeQFiles(qFiles []*QLogFile) error {
}
if len(errs) > 0 {
- return errors.List("error while closing QLogReader", errs...)
+ return errors.List("error while closing qLogReader", errs...)
}
return nil
diff --git a/internal/querylog/qlogreader_test.go b/internal/querylog/qlogreader_test.go
index ffdc285b..43bb3d5c 100644
--- a/internal/querylog/qlogreader_test.go
+++ b/internal/querylog/qlogreader_test.go
@@ -10,15 +10,15 @@ import (
"github.com/stretchr/testify/require"
)
-// newTestQLogReader creates new *QLogReader for tests and registers the
+// newTestQLogReader creates new *qLogReader for tests and registers the
// required cleanup functions.
-func newTestQLogReader(t *testing.T, filesNum, linesNum int) (reader *QLogReader) {
+func newTestQLogReader(t *testing.T, filesNum, linesNum int) (reader *qLogReader) {
t.Helper()
testFiles := prepareTestFiles(t, filesNum, linesNum)
- // Create the new QLogReader instance.
- reader, err := NewQLogReader(testFiles)
+ // Create the new qLogReader instance.
+ reader, err := newQLogReader(testFiles)
require.NoError(t, err)
assert.NotNil(t, reader)
@@ -75,9 +75,9 @@ func TestQLogReader_Seek(t *testing.T) {
r := newTestQLogReader(t, 2, 10000)
testCases := []struct {
+ want error
name string
time string
- want error
}{{
name: "not_too_old",
time: "2020-02-18T22:39:35.920973+03:00",
@@ -97,7 +97,7 @@ func TestQLogReader_Seek(t *testing.T) {
}, {
name: "non-existent_long_ago",
time: "2000-02-19T01:23:16.920973+03:00",
- want: ErrTSNotFound,
+ want: errTSNotFound,
}, {
name: "non-existent_far_ahead",
time: "2100-02-19T01:23:16.920973+03:00",
@@ -105,7 +105,7 @@ func TestQLogReader_Seek(t *testing.T) {
}, {
name: "non-existent_but_could",
time: "2020-02-18T22:36:37.000000+03:00",
- want: ErrTSNotFound,
+ want: errTSNotFound,
}}
for _, tc := range testCases {
@@ -125,9 +125,9 @@ func TestQLogReader_ReadNext(t *testing.T) {
r := newTestQLogReader(t, filesNum, linesNum)
testCases := []struct {
+ want error
name string
start int
- want error
}{{
name: "ok",
start: 0,
diff --git a/internal/querylog/search.go b/internal/querylog/search.go
index db2d3474..9102c49f 100644
--- a/internal/querylog/search.go
+++ b/internal/querylog/search.go
@@ -1,9 +1,11 @@
package querylog
import (
+ "fmt"
"io"
"time"
+ "github.com/AdguardTeam/golibs/errors"
"github.com/AdguardTeam/golibs/log"
"golang.org/x/exp/slices"
)
@@ -134,84 +136,112 @@ func (l *queryLog) search(params *searchParams) (entries []*logEntry, oldest tim
return entries, oldest
}
-// searchFiles looks up log records from all log files. It optionally uses the
-// client cache, if provided. searchFiles does not scan more than
-// maxFileScanEntries so callers may need to call it several times to get all
-// results. oldest and total are the time of the oldest processed entry and the
-// total number of processed entries, including discarded ones, correspondingly.
-func (l *queryLog) searchFiles(
- params *searchParams,
- cache clientCache,
-) (entries []*logEntry, oldest time.Time, total int) {
+// seekRecord changes the current position to the next record older than the
+// provided parameter.
+func (r *qLogReader) seekRecord(olderThan time.Time) (err error) {
+ if olderThan.IsZero() {
+ return r.SeekStart()
+ }
+
+ err = r.seekTS(olderThan.UnixNano())
+ if err == nil {
+ // Read to the next record, because we only need the one that goes
+ // after it.
+ _, err = r.ReadNext()
+ }
+
+ return err
+}
+
+// setQLogReader creates a reader with the specified files and sets the
+// position to the next record older than the provided parameter.
+func (l *queryLog) setQLogReader(olderThan time.Time) (qr *qLogReader, err error) {
files := []string{
l.logFile + ".1",
l.logFile,
}
- r, err := NewQLogReader(files)
+ r, err := newQLogReader(files)
if err != nil {
- log.Error("querylog: opening qlog reader: %s", err)
-
- return entries, oldest, 0
- }
-
- defer func() {
- closeErr := r.Close()
- if closeErr != nil {
- log.Error("querylog: closing file: %s", err)
- }
- }()
-
- if params.olderThan.IsZero() {
- err = r.SeekStart()
- } else {
- err = r.seekTS(params.olderThan.UnixNano())
- if err == nil {
- // Read to the next record, because we only need the one that goes
- // after it.
- _, err = r.ReadNext()
- }
+ return nil, fmt.Errorf("opening qlog reader: %s", err)
}
+ err = r.seekRecord(olderThan)
if err != nil {
- log.Debug("querylog: cannot seek to %s: %s", params.olderThan, err)
+ defer func() { err = errors.WithDeferred(err, r.Close()) }()
+ log.Debug("querylog: cannot seek to %s: %s", olderThan, err)
- return entries, oldest, 0
+ return nil, nil
}
- totalLimit := params.offset + params.limit
- oldestNano := int64(0)
+ return r, nil
+}
- // By default, we do not scan more than maxFileScanEntries at once. The
- // idea is to make search calls faster so that the UI could handle it and
- // show something quicker. This behavior can be overridden if
- // maxFileScanEntries is set to 0.
+// readEntries reads entries from the reader to totalLimit. By default, we do
+// not scan more than maxFileScanEntries at once. The idea is to make search
+// calls faster so that the UI could handle it and show something quicker.
+// This behavior can be overridden if maxFileScanEntries is set to 0.
+func (l *queryLog) readEntries(
+ r *qLogReader,
+ params *searchParams,
+ cache clientCache,
+ totalLimit int,
+) (entries []*logEntry, oldestNano int64, total int) {
for total < params.maxFileScanEntries || params.maxFileScanEntries <= 0 {
- var e *logEntry
- var ts int64
-
- e, ts, err = l.readNextEntry(r, params, cache)
- if err != nil {
- if err == io.EOF {
+ ent, ts, rErr := l.readNextEntry(r, params, cache)
+ if rErr != nil {
+ if rErr == io.EOF {
oldestNano = 0
break
}
- log.Error("querylog: reading next entry: %s", err)
+ log.Error("querylog: reading next entry: %s", rErr)
}
oldestNano = ts
total++
- if e != nil {
- entries = append(entries, e)
- if len(entries) == totalLimit {
- break
- }
+ if ent == nil {
+ continue
+ }
+
+ entries = append(entries, ent)
+ if len(entries) == totalLimit {
+ break
}
}
+ return entries, oldestNano, total
+}
+
+// searchFiles looks up log records from all log files. It optionally uses the
+// client cache, if provided. searchFiles does not scan more than
+// maxFileScanEntries so callers may need to call it several times to get all
+// the results. oldest and total are the time of the oldest processed entry
+// and the total number of processed entries, including discarded ones,
+// correspondingly.
+func (l *queryLog) searchFiles(
+ params *searchParams,
+ cache clientCache,
+) (entries []*logEntry, oldest time.Time, total int) {
+ r, err := l.setQLogReader(params.olderThan)
+ if err != nil {
+ log.Error("querylog: %s", err)
+ }
+
+ if r == nil {
+ return entries, oldest, 0
+ }
+
+ defer func() {
+ if closeErr := r.Close(); closeErr != nil {
+ log.Error("querylog: closing file: %s", closeErr)
+ }
+ }()
+
+ totalLimit := params.offset + params.limit
+ entries, oldestNano, total := l.readEntries(r, params, cache, totalLimit)
if oldestNano != 0 {
oldest = time.Unix(0, oldestNano)
}
@@ -243,11 +273,11 @@ func (f quickMatchClientFinder) findClient(clientID, ip string) (c *Client) {
}
// readNextEntry reads the next log entry and checks if it matches the search
-// criteria. It optionally uses the client cache, if provided. e is nil if the
-// entry doesn't match the search criteria. ts is the timestamp of the
+// criteria. It optionally uses the client cache, if provided. e is nil if
+// the entry doesn't match the search criteria. ts is the timestamp of the
// processed entry.
func (l *queryLog) readNextEntry(
- r *QLogReader,
+ r *qLogReader,
params *searchParams,
cache clientCache,
) (e *logEntry, ts int64, err error) {
diff --git a/internal/querylog/searchparams.go b/internal/querylog/searchparams.go
index f18ff561..a0a0ff6c 100644
--- a/internal/querylog/searchparams.go
+++ b/internal/querylog/searchparams.go
@@ -2,18 +2,25 @@ package querylog
import "time"
-// searchParams represent the search query sent by the client
+// searchParams represent the search query sent by the client.
type searchParams struct {
- // searchCriteria - list of search criteria that we use to get filter results
- searchCriteria []searchCriterion
-
- // olderThen - return entries that are older than this value
- // if not set - disregard it and return any value
+ // olderThen represents a parameter for entries that are older than this
+ // parameter value. If not set, disregard it and return any value.
olderThan time.Time
- offset int // offset for the search
- limit int // limit the number of records returned
- maxFileScanEntries int // maximum log entries to scan in query log files. if 0 - no limit
+ // searchCriteria is a list of search criteria that we use to get filter
+ // results.
+ searchCriteria []searchCriterion
+
+ // offset for the search.
+ offset int
+
+ // limit the number of records returned.
+ limit int
+
+ // maxFileScanEntries is a maximum of log entries to scan in query log
+ // files. If not set, then no limit.
+ maxFileScanEntries int
}
// newSearchParams - creates an empty instance of searchParams
diff --git a/internal/tools/go.mod b/internal/tools/go.mod
index 9795d39d..bf6a766d 100644
--- a/internal/tools/go.mod
+++ b/internal/tools/go.mod
@@ -8,16 +8,16 @@ require (
github.com/gordonklaus/ineffassign v0.0.0-20230107090616-13ace0543b28
github.com/kisielk/errcheck v1.6.3
github.com/kyoh86/looppointer v0.2.1
- github.com/securego/gosec/v2 v2.15.0
- golang.org/x/tools v0.8.0
- golang.org/x/vuln v0.0.0-20230418010118-28ba02ac73db
+ github.com/securego/gosec/v2 v2.16.0
+ golang.org/x/tools v0.9.3
+ golang.org/x/vuln v0.1.0
honnef.co/go/tools v0.4.3
mvdan.cc/gofumpt v0.5.0
mvdan.cc/unparam v0.0.0-20230312165513-e84e2d14e3b8
)
require (
- github.com/BurntSushi/toml v1.2.1 // indirect
+ github.com/BurntSushi/toml v1.3.1 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gookit/color v1.5.3 // indirect
@@ -25,9 +25,9 @@ require (
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 // indirect
- golang.org/x/exp/typeparams v0.0.0-20230321023759-10a507213a29 // indirect
+ golang.org/x/exp/typeparams v0.0.0-20230522175609-2e198f4a06a1 // indirect
golang.org/x/mod v0.10.0 // indirect
- golang.org/x/sync v0.1.0 // indirect
- golang.org/x/sys v0.7.0 // indirect
+ golang.org/x/sync v0.2.0 // indirect
+ golang.org/x/sys v0.8.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/internal/tools/go.sum b/internal/tools/go.sum
index 21f23c63..93724bea 100644
--- a/internal/tools/go.sum
+++ b/internal/tools/go.sum
@@ -1,17 +1,19 @@
-github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak=
-github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
+github.com/BurntSushi/toml v1.3.1 h1:rHnDkSK+/g6DlREUK73PkmIs60pqrnuduK+JmP++JmU=
+github.com/BurntSushi/toml v1.3.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
+github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
+github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/golangci/misspell v0.4.0 h1:KtVB/hTK4bbL/S6bs64rYyk8adjmh1BygbBiaAiX+a0=
github.com/golangci/misspell v0.4.0/go.mod h1:W6O/bwV6lGDxUCChm2ykw9NQdd5bYd1Xkjo88UcWyJc=
github.com/google/go-cmdtest v0.4.1-0.20220921163831-55ab3332a786 h1:rcv+Ippz6RAtvaGgKxc+8FQIpxHgsF+HBzPyYL2cyVU=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38 h1:yAJXTCF9TqKcTiHJAE8dj7HMvPfh66eeA2JYW7eFpSE=
github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -29,13 +31,13 @@ github.com/kyoh86/nolint v0.0.1 h1:GjNxDEkVn2wAxKHtP7iNTrRxytRZ1wXxLV5j4XzGfRU=
github.com/kyoh86/nolint v0.0.1/go.mod h1:1ZiZZ7qqrZ9dZegU96phwVcdQOMKIqRzFJL3ewq9gtI=
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA=
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8=
-github.com/onsi/ginkgo/v2 v2.8.0 h1:pAM+oBNPrpXRs+E/8spkeGx9QgekbRVyr74EUvRVOUI=
-github.com/onsi/gomega v1.26.0 h1:03cDLK28U6hWvCAns6NeydX3zIm4SF3ci69ulidS32Q=
+github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
+github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
-github.com/securego/gosec/v2 v2.15.0 h1:v4Ym7FF58/jlykYmmhZ7mTm7FQvN/setNm++0fgIAtw=
-github.com/securego/gosec/v2 v2.15.0/go.mod h1:VOjTrZOkUtSDt2QLSJmQBMWnvwiQPEjg0l+5juIqGk8=
+github.com/securego/gosec/v2 v2.16.0 h1:Pi0JKoasQQ3NnoRao/ww/N/XdynIB9NRYYZT5CyOs5U=
+github.com/securego/gosec/v2 v2.16.0/go.mod h1:xvLcVZqUfo4aAQu56TNv7/Ltz6emAOQAEsrZrt7uGlI=
github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
@@ -49,8 +51,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20230321023759-10a507213a29 h1:ooxPy7fPvB4kwsA2h+iBNHkAbp/4JxTSwCmvdjEYmug=
golang.org/x/exp v0.0.0-20230321023759-10a507213a29/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
-golang.org/x/exp/typeparams v0.0.0-20230321023759-10a507213a29 h1:e7LhZmJ631l59keHP9ssC3sgSn3/oiEHKHKXDkimURY=
-golang.org/x/exp/typeparams v0.0.0-20230321023759-10a507213a29/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
+golang.org/x/exp/typeparams v0.0.0-20230522175609-2e198f4a06a1 h1:pnP8r+W8Fm7XJ8CWtXi4S9oJmPBTrkfYN/dNbaPj6Y4=
+golang.org/x/exp/typeparams v0.0.0-20230522175609-2e198f4a06a1/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
@@ -62,12 +64,12 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
+golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
-golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
+golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -77,23 +79,23 @@ golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
-golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
-golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
+golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20201007032633-0806396f153e/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
-golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
-golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
-golang.org/x/vuln v0.0.0-20230418010118-28ba02ac73db h1:tLxfII6jPR3mfwEMkyOakawu+Lldo9hIA7vliXnDZYg=
-golang.org/x/vuln v0.0.0-20230418010118-28ba02ac73db/go.mod h1:64LpnL2PuSMzFYeCmJjYiRbroOUG9aCZYznINnF5PHE=
+golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
+golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
+golang.org/x/vuln v0.1.0 h1:9GRdj6wAIkDrsMevuolY+SXERPjQPp2P1ysYA0jpZe0=
+golang.org/x/vuln v0.1.0/go.mod h1:/YuzZYjGbwB8y19CisAppfyw3uTZnuCz3r+qgx/QRzU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/openapi/CHANGELOG.md b/openapi/CHANGELOG.md
index 922788bb..7c68a47b 100644
--- a/openapi/CHANGELOG.md
+++ b/openapi/CHANGELOG.md
@@ -4,6 +4,28 @@
## v0.108.0: API changes
+## v0.107.30: API changes
+
+### `POST /control/version.json` and `GET /control/dhcp/interfaces` content type
+
+* The value of the `Content-Type` header in the `POST /control/version.json` and
+ `GET /control/dhcp/interfaces` HTTP APIs is now correctly set to
+ `application/json` as opposed to `text/plain`.
+
+### New HTTP API 'PUT /control/rewrite/update'
+
+* The new `PUT /control/rewrite/update` HTTP API allows rewrite rule updates.
+ It accepts a JSON object with the following format:
+
+```json
+{
+ "target": {"domain":"example.com","answer":"answer-to-update"},
+ "update": {"domain":"example.com","answer":"new-answer"}
+}
+```
+
+
+
## v0.107.29: API changes
### `GET /control/clients` And `GET /control/clients/find`
@@ -16,6 +38,8 @@
set AdGuard Home will use default value (false). It can be changed in the
future versions.
+
+
## v0.107.27: API changes
### The new optional fields `"edns_cs_use_custom"` and `"edns_cs_custom_ip"` in `DNSConfig`
diff --git a/scripts/make/go-lint.sh b/scripts/make/go-lint.sh
index 52d20707..cf07e3a9 100644
--- a/scripts/make/go-lint.sh
+++ b/scripts/make/go-lint.sh
@@ -35,7 +35,7 @@ set -f -u
go_version="$( "${GO:-go}" version )"
readonly go_version
-go_min_version='go1.19.8'
+go_min_version='go1.19.10'
go_version_msg="
warning: your go version (${go_version}) is different from the recommended minimal one (${go_min_version}).
if you have the version installed, please set the GO environment variable.
@@ -159,30 +159,7 @@ run_linter "$GO" vet ./...
run_linter govulncheck ./...
-# Apply more lax standards to the code we haven't properly refactored yet.
-run_linter gocyclo --over 13\
- ./internal/dhcpd\
- ./internal/home/\
- ./internal/querylog/\
- ;
-
-# Apply the normal standards to new or somewhat refactored code.
-run_linter gocyclo --over 10\
- ./internal/aghio/\
- ./internal/aghnet/\
- ./internal/aghos/\
- ./internal/aghtest/\
- ./internal/dnsforward/\
- ./internal/filtering/\
- ./internal/stats/\
- ./internal/tools/\
- ./internal/updater/\
- ./internal/version/\
- ./scripts/blocked-services/\
- ./scripts/vetted-filters/\
- ./scripts/translations/\
- ./main.go\
- ;
+run_linter gocyclo --over 10 .
run_linter ineffassign ./...