mirror of
https://github.com/AdguardTeam/AdGuardHome.git
synced 2024-11-21 20:45:33 +03:00
Pull request 1928: 1453-stats-tests
Updates #1453. Squashed commit of the following: commit f08f68ef5493dad03d3eb120d886f2df1af28be6 Merge: b70b088af54aee2272
Author: Stanislav Chzhen <s.chzhen@adguard.com> Date: Tue Aug 8 19:04:06 2023 +0300 Merge branch 'master' into 1453-stats-tests commit b70b088af0fdc7d6d048d688160048bad1fceb12 Author: Stanislav Chzhen <s.chzhen@adguard.com> Date: Thu Aug 3 19:32:04 2023 +0300 stats: imp code commit c341012ba61894c255c1868624be1cac0d26a6fa Merge: a2ac8c34e5eb3cd0f9
Author: Stanislav Chzhen <s.chzhen@adguard.com> Date: Thu Aug 3 13:36:24 2023 +0300 Merge branch 'master' into 1453-stats-tests commit a2ac8c34ee32606ca5e259c3e2a47db0dd5858de Author: Ildar Kamalov <ik@adguard.com> Date: Thu Aug 3 13:25:12 2023 +0300 client: add top upstreams and average processing time tables commit 11118947f9bf945be0b056f8475cf3b848c6e66e Author: Stanislav Chzhen <s.chzhen@adguard.com> Date: Tue Aug 1 17:24:57 2023 +0300 stats: imp docs commit 904cf81d02a1f327b9647fa7ad9e181cfabb68a4 Author: Stanislav Chzhen <s.chzhen@adguard.com> Date: Mon Jul 31 17:34:06 2023 +0300 stats: imp code commit 34f0c96dd5865d1470385322a88842dd0b3d996d Author: Stanislav Chzhen <s.chzhen@adguard.com> Date: Mon Jul 31 15:43:46 2023 +0300 all: imp docs commit 2cb2d0d8bef3580f64bc25c414fe9b5ea6b9f997 Author: Stanislav Chzhen <s.chzhen@adguard.com> Date: Fri Jul 28 17:24:31 2023 +0300 all: imp code commit 5251a899fecc21e50a0ba06042f96f5b404e196a Merge: b6c2b12d4300821a7f
Author: Stanislav Chzhen <s.chzhen@adguard.com> Date: Thu Jul 27 20:34:39 2023 +0300 Merge branch 'master' into 1453-stats-tests commit b6c2b12d4425012efd73549c3a426735f3a677cd Author: Stanislav Chzhen <s.chzhen@adguard.com> Date: Thu Jul 27 20:32:18 2023 +0300 stats: imp code commit 5546b82a78326f9cc6d8c87df5083f8fc66a0178 Merge: 8a3d6b1b45f8fa006c
Author: Stanislav Chzhen <s.chzhen@adguard.com> Date: Thu Jul 27 14:24:01 2023 +0300 Merge branch 'master' into 1453-stats-tests commit 8a3d6b1b49ce189f95adfa7406a34108e885e676 Author: Stanislav Chzhen <s.chzhen@adguard.com> Date: Thu Jul 27 14:17:47 2023 +0300 all: imp code commit 2a48001e275e3cdcf70e13e1c9cebd4e502f3259 Author: Stanislav Chzhen <s.chzhen@adguard.com> Date: Tue Jul 25 18:27:20 2023 +0300 all: imp docs commit 3dd21890175af32a3368378f7e013383f6d040ec Author: Stanislav Chzhen <s.chzhen@adguard.com> Date: Tue Jul 25 16:00:39 2023 +0300 all: imp naming commit 6124456fc3149b71f6bd58d35ecf24eb6cf40d5d Author: Stanislav Chzhen <s.chzhen@adguard.com> Date: Thu Jul 20 16:15:56 2023 +0300 all: add upstreams avg processing time commit 187ad0c77a81c9fd95c24e23141355db2e83e50d Author: Stanislav Chzhen <s.chzhen@adguard.com> Date: Tue Jul 18 16:42:19 2023 +0300 all: add top upstreams
This commit is contained in:
parent
54aee22720
commit
c47509fabc
19 changed files with 686 additions and 140 deletions
|
@ -25,6 +25,8 @@ NOTE: Add new changes BELOW THIS COMMENT.
|
|||
|
||||
### Added
|
||||
|
||||
- Two new metrics showing total number of responses from each upstream DNS
|
||||
server and their average processing time in the Web UI ([#1453]).
|
||||
- The ability to set the port for the `pprof` debug API, see configuration
|
||||
changes below.
|
||||
|
||||
|
@ -59,6 +61,7 @@ In this release, the schema version has changed from 24 to 25.
|
|||
- Panic on shutting down while DNS requests are in process of filtering
|
||||
([#5948]).
|
||||
|
||||
[#1453]: https://github.com/AdguardTeam/AdGuardHome/issues/1453
|
||||
[#5948]: https://github.com/AdguardTeam/AdGuardHome/issues/5948
|
||||
|
||||
<!--
|
||||
|
|
|
@ -125,6 +125,8 @@
|
|||
"top_clients": "Top clients",
|
||||
"no_clients_found": "No clients found",
|
||||
"general_statistics": "General statistics",
|
||||
"top_upstreams": "Top upstreams",
|
||||
"no_upstreams_data_found": "No upstreams data found",
|
||||
"number_of_dns_query_days": "The number of DNS queries processed for the last {{count}} day",
|
||||
"number_of_dns_query_days_plural": "The number of DNS queries processed for the last {{count}} days",
|
||||
"number_of_dns_query_24_hours": "The number of DNS queries processed for the last 24 hours",
|
||||
|
@ -134,6 +136,7 @@
|
|||
"enforced_save_search": "Enforced safe search",
|
||||
"number_of_dns_query_to_safe_search": "The number of DNS requests to search engines for which Safe Search was enforced",
|
||||
"average_processing_time": "Average processing time",
|
||||
"processing_time": "Processing time",
|
||||
"average_processing_time_hint": "Average time in milliseconds on processing a DNS request",
|
||||
"block_domain_use_filters_and_hosts": "Block domains using filters and hosts files",
|
||||
"filters_block_toggle_hint": "You can setup blocking rules in the <a>Filters</a> settings.",
|
||||
|
@ -158,6 +161,7 @@
|
|||
"upstream_dns_configured_in_file": "Configured in {{path}}",
|
||||
"test_upstream_btn": "Test upstreams",
|
||||
"upstreams": "Upstreams",
|
||||
"upstream": "Upstream",
|
||||
"apply_btn": "Apply",
|
||||
"disabled_filtering_toast": "Disabled filtering",
|
||||
"enabled_filtering_toast": "Enabled filtering",
|
||||
|
|
|
@ -56,6 +56,8 @@ export const getStats = () => async (dispatch) => {
|
|||
top_clients: topClientsWithInfo,
|
||||
top_queried_domains: normalizeTopStats(stats.top_queried_domains),
|
||||
avg_processing_time: secondsToMilliseconds(stats.avg_processing_time),
|
||||
top_upstreams_responses: normalizeTopStats(stats.top_upstreams_responses),
|
||||
top_upstrems_avg_time: normalizeTopStats(stats.top_upstreams_avg_time),
|
||||
};
|
||||
|
||||
dispatch(getStatsSuccess(normalizedStats));
|
||||
|
|
79
client/src/components/Dashboard/UpstreamAvgTime.js
Normal file
79
client/src/components/Dashboard/UpstreamAvgTime.js
Normal file
|
@ -0,0 +1,79 @@
|
|||
import React from 'react';
|
||||
import ReactTable from 'react-table';
|
||||
import PropTypes from 'prop-types';
|
||||
import round from 'lodash/round';
|
||||
import { withTranslation, Trans } from 'react-i18next';
|
||||
|
||||
import Card from '../ui/Card';
|
||||
import DomainCell from './DomainCell';
|
||||
|
||||
const TimeCell = ({ value }) => {
|
||||
if (!value) {
|
||||
return '–';
|
||||
}
|
||||
|
||||
const valueInMilliseconds = round(value * 1000);
|
||||
|
||||
return (
|
||||
<div className="logs__row o-hidden">
|
||||
<span className="logs__text logs__text--full" title={valueInMilliseconds}>
|
||||
{valueInMilliseconds} ms
|
||||
</span>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
TimeCell.propTypes = {
|
||||
value: PropTypes.oneOfType([
|
||||
PropTypes.string,
|
||||
PropTypes.number,
|
||||
]),
|
||||
};
|
||||
|
||||
const UpstreamAvgTime = ({
|
||||
t,
|
||||
refreshButton,
|
||||
topUpstreamsAvgTime,
|
||||
subtitle,
|
||||
}) => (
|
||||
<Card
|
||||
title={t('average_processing_time')}
|
||||
subtitle={subtitle}
|
||||
bodyType="card-table"
|
||||
refresh={refreshButton}
|
||||
>
|
||||
<ReactTable
|
||||
data={topUpstreamsAvgTime.map(({ name: domain, count }) => ({
|
||||
domain,
|
||||
count,
|
||||
}))}
|
||||
columns={[
|
||||
{
|
||||
Header: <Trans>upstream</Trans>,
|
||||
accessor: 'domain',
|
||||
Cell: DomainCell,
|
||||
},
|
||||
{
|
||||
Header: <Trans>processing_time</Trans>,
|
||||
accessor: 'count',
|
||||
maxWidth: 190,
|
||||
Cell: TimeCell,
|
||||
},
|
||||
]}
|
||||
showPagination={false}
|
||||
noDataText={t('no_upstreams_data_found')}
|
||||
minRows={6}
|
||||
defaultPageSize={100}
|
||||
className="-highlight card-table-overflow--limited stats__table"
|
||||
/>
|
||||
</Card>
|
||||
);
|
||||
|
||||
UpstreamAvgTime.propTypes = {
|
||||
topUpstreamsAvgTime: PropTypes.array.isRequired,
|
||||
refreshButton: PropTypes.node.isRequired,
|
||||
subtitle: PropTypes.string.isRequired,
|
||||
t: PropTypes.func.isRequired,
|
||||
};
|
||||
|
||||
export default withTranslation()(UpstreamAvgTime);
|
76
client/src/components/Dashboard/UpstreamResponses.js
Normal file
76
client/src/components/Dashboard/UpstreamResponses.js
Normal file
|
@ -0,0 +1,76 @@
|
|||
import React from 'react';
|
||||
import ReactTable from 'react-table';
|
||||
import PropTypes from 'prop-types';
|
||||
import { withTranslation, Trans } from 'react-i18next';
|
||||
|
||||
import Card from '../ui/Card';
|
||||
import Cell from '../ui/Cell';
|
||||
import DomainCell from './DomainCell';
|
||||
|
||||
import { getPercent } from '../../helpers/helpers';
|
||||
import { STATUS_COLORS } from '../../helpers/constants';
|
||||
|
||||
const CountCell = (totalBlocked) => (
|
||||
function cell(row) {
|
||||
const { value } = row;
|
||||
const percent = getPercent(totalBlocked, value);
|
||||
|
||||
return (
|
||||
<Cell
|
||||
value={value}
|
||||
percent={percent}
|
||||
color={STATUS_COLORS.green}
|
||||
/>
|
||||
);
|
||||
}
|
||||
);
|
||||
|
||||
const UpstreamResponses = ({
|
||||
t,
|
||||
refreshButton,
|
||||
topUpstreamsResponses,
|
||||
dnsQueries,
|
||||
subtitle,
|
||||
}) => (
|
||||
<Card
|
||||
title={t('top_upstreams')}
|
||||
subtitle={subtitle}
|
||||
bodyType="card-table"
|
||||
refresh={refreshButton}
|
||||
>
|
||||
<ReactTable
|
||||
data={topUpstreamsResponses.map(({ name: domain, count }) => ({
|
||||
domain,
|
||||
count,
|
||||
}))}
|
||||
columns={[
|
||||
{
|
||||
Header: <Trans>upstream</Trans>,
|
||||
accessor: 'domain',
|
||||
Cell: DomainCell,
|
||||
},
|
||||
{
|
||||
Header: <Trans>requests_count</Trans>,
|
||||
accessor: 'count',
|
||||
maxWidth: 190,
|
||||
Cell: CountCell(dnsQueries),
|
||||
},
|
||||
]}
|
||||
showPagination={false}
|
||||
noDataText={t('no_upstreams_data_found')}
|
||||
minRows={6}
|
||||
defaultPageSize={100}
|
||||
className="-highlight card-table-overflow--limited stats__table"
|
||||
/>
|
||||
</Card>
|
||||
);
|
||||
|
||||
UpstreamResponses.propTypes = {
|
||||
topUpstreamsResponses: PropTypes.array.isRequired,
|
||||
dnsQueries: PropTypes.number.isRequired,
|
||||
refreshButton: PropTypes.node.isRequired,
|
||||
subtitle: PropTypes.string.isRequired,
|
||||
t: PropTypes.func.isRequired,
|
||||
};
|
||||
|
||||
export default withTranslation()(UpstreamResponses);
|
|
@ -21,6 +21,8 @@ import PageTitle from '../ui/PageTitle';
|
|||
import Loading from '../ui/Loading';
|
||||
import './Dashboard.css';
|
||||
import Dropdown from '../ui/Dropdown';
|
||||
import UpstreamResponses from './UpstreamResponses';
|
||||
import UpstreamAvgTime from './UpstreamAvgTime';
|
||||
|
||||
const Dashboard = ({
|
||||
getAccessList,
|
||||
|
@ -136,12 +138,12 @@ const Dashboard = ({
|
|||
<PageTitle title={t('dashboard')} containerClass="page-title--dashboard">
|
||||
<div className="page-title__protection">
|
||||
<button
|
||||
type="button"
|
||||
className={buttonClass}
|
||||
onClick={() => {
|
||||
toggleProtection(protectionEnabled);
|
||||
}}
|
||||
disabled={processingProtection}
|
||||
type="button"
|
||||
className={buttonClass}
|
||||
onClick={() => {
|
||||
toggleProtection(protectionEnabled);
|
||||
}}
|
||||
disabled={processingProtection}
|
||||
>
|
||||
{protectionDisabledDuration
|
||||
? `${t('enable_protection_timer')} ${getRemaningTimeText(protectionDisabledDuration)}`
|
||||
|
@ -160,9 +162,9 @@ const Dashboard = ({
|
|||
</Dropdown>}
|
||||
</div>
|
||||
<button
|
||||
type="button"
|
||||
className="btn btn-outline-primary btn-sm"
|
||||
onClick={getAllStats}
|
||||
type="button"
|
||||
className="btn btn-outline-primary btn-sm"
|
||||
onClick={getAllStats}
|
||||
>
|
||||
<Trans>refresh_statics</Trans>
|
||||
</button>
|
||||
|
@ -185,53 +187,68 @@ const Dashboard = ({
|
|||
</div>
|
||||
)}
|
||||
<Statistics
|
||||
interval={msToDays(stats.interval)}
|
||||
dnsQueries={stats.dnsQueries}
|
||||
blockedFiltering={stats.blockedFiltering}
|
||||
replacedSafebrowsing={stats.replacedSafebrowsing}
|
||||
replacedParental={stats.replacedParental}
|
||||
numDnsQueries={stats.numDnsQueries}
|
||||
numBlockedFiltering={stats.numBlockedFiltering}
|
||||
numReplacedSafebrowsing={stats.numReplacedSafebrowsing}
|
||||
numReplacedParental={stats.numReplacedParental}
|
||||
refreshButton={refreshButton}
|
||||
interval={msToDays(stats.interval)}
|
||||
dnsQueries={stats.dnsQueries}
|
||||
blockedFiltering={stats.blockedFiltering}
|
||||
replacedSafebrowsing={stats.replacedSafebrowsing}
|
||||
replacedParental={stats.replacedParental}
|
||||
numDnsQueries={stats.numDnsQueries}
|
||||
numBlockedFiltering={stats.numBlockedFiltering}
|
||||
numReplacedSafebrowsing={stats.numReplacedSafebrowsing}
|
||||
numReplacedParental={stats.numReplacedParental}
|
||||
refreshButton={refreshButton}
|
||||
/>
|
||||
</div>
|
||||
<div className="col-lg-6">
|
||||
<Counters
|
||||
subtitle={subtitle}
|
||||
refreshButton={refreshButton}
|
||||
subtitle={subtitle}
|
||||
refreshButton={refreshButton}
|
||||
/>
|
||||
</div>
|
||||
<div className="col-lg-6">
|
||||
<Clients
|
||||
subtitle={subtitle}
|
||||
dnsQueries={stats.numDnsQueries}
|
||||
topClients={stats.topClients}
|
||||
clients={dashboard.clients}
|
||||
autoClients={dashboard.autoClients}
|
||||
refreshButton={refreshButton}
|
||||
processingAccessSet={access.processingSet}
|
||||
disallowedClients={access.disallowed_clients}
|
||||
subtitle={subtitle}
|
||||
dnsQueries={stats.numDnsQueries}
|
||||
topClients={stats.topClients}
|
||||
clients={dashboard.clients}
|
||||
autoClients={dashboard.autoClients}
|
||||
refreshButton={refreshButton}
|
||||
processingAccessSet={access.processingSet}
|
||||
disallowedClients={access.disallowed_clients}
|
||||
/>
|
||||
</div>
|
||||
<div className="col-lg-6">
|
||||
<QueriedDomains
|
||||
subtitle={subtitle}
|
||||
dnsQueries={stats.numDnsQueries}
|
||||
topQueriedDomains={stats.topQueriedDomains}
|
||||
refreshButton={refreshButton}
|
||||
subtitle={subtitle}
|
||||
dnsQueries={stats.numDnsQueries}
|
||||
topQueriedDomains={stats.topQueriedDomains}
|
||||
refreshButton={refreshButton}
|
||||
/>
|
||||
</div>
|
||||
<div className="col-lg-6">
|
||||
<BlockedDomains
|
||||
subtitle={subtitle}
|
||||
topBlockedDomains={stats.topBlockedDomains}
|
||||
blockedFiltering={stats.numBlockedFiltering}
|
||||
replacedSafebrowsing={stats.numReplacedSafebrowsing}
|
||||
replacedSafesearch={stats.numReplacedSafesearch}
|
||||
replacedParental={stats.numReplacedParental}
|
||||
refreshButton={refreshButton}
|
||||
subtitle={subtitle}
|
||||
topBlockedDomains={stats.topBlockedDomains}
|
||||
blockedFiltering={stats.numBlockedFiltering}
|
||||
replacedSafebrowsing={stats.numReplacedSafebrowsing}
|
||||
replacedSafesearch={stats.numReplacedSafesearch}
|
||||
replacedParental={stats.numReplacedParental}
|
||||
refreshButton={refreshButton}
|
||||
/>
|
||||
</div>
|
||||
<div className="col-lg-6">
|
||||
<UpstreamResponses
|
||||
subtitle={subtitle}
|
||||
dnsQueries={stats.numDnsQueries}
|
||||
topUpstreamsResponses={stats.topUpstreamsResponses}
|
||||
refreshButton={refreshButton}
|
||||
/>
|
||||
</div>
|
||||
<div className="col-lg-6">
|
||||
<UpstreamAvgTime
|
||||
subtitle={subtitle}
|
||||
topUpstreamsAvgTime={stats.topUpstreamsAvgTime}
|
||||
refreshButton={refreshButton}
|
||||
/>
|
||||
</div>
|
||||
</div>}
|
||||
|
|
|
@ -1,25 +1,39 @@
|
|||
import React from 'react';
|
||||
import PropTypes from 'prop-types';
|
||||
|
||||
import LogsSearchLink from './LogsSearchLink';
|
||||
import { formatNumber } from '../../helpers/helpers';
|
||||
|
||||
const Cell = ({
|
||||
value, percent, color, search,
|
||||
}) => <div className="stats__row">
|
||||
<div className="stats__row-value mb-1">
|
||||
<strong><LogsSearchLink search={search}>{formatNumber(value)}</LogsSearchLink></strong>
|
||||
<small className="ml-3 text-muted">{percent}%</small>
|
||||
value,
|
||||
percent,
|
||||
color,
|
||||
search,
|
||||
}) => (
|
||||
<div className="stats__row">
|
||||
<div className="stats__row-value mb-1">
|
||||
<strong>
|
||||
{search ? (
|
||||
<LogsSearchLink search={search}>
|
||||
{formatNumber(value)}
|
||||
</LogsSearchLink>
|
||||
) : (
|
||||
formatNumber(value)
|
||||
)}
|
||||
</strong>
|
||||
<small className="ml-3 text-muted">{percent}%</small>
|
||||
</div>
|
||||
<div className="progress progress-xs">
|
||||
<div
|
||||
className="progress-bar"
|
||||
style={{
|
||||
width: `${percent}%`,
|
||||
backgroundColor: color,
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
<div className="progress progress-xs">
|
||||
<div
|
||||
className="progress-bar"
|
||||
style={{
|
||||
width: `${percent}%`,
|
||||
backgroundColor: color,
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
</div>;
|
||||
);
|
||||
|
||||
Cell.propTypes = {
|
||||
value: PropTypes.number.isRequired,
|
||||
|
|
|
@ -58,6 +58,8 @@ const stats = handleActions(
|
|||
num_replaced_safebrowsing: numReplacedSafebrowsing,
|
||||
num_replaced_safesearch: numReplacedSafesearch,
|
||||
avg_processing_time: avgProcessingTime,
|
||||
top_upstreams_responses: topUpstreamsResponses,
|
||||
top_upstrems_avg_time: topUpstreamsAvgTime,
|
||||
} = payload;
|
||||
|
||||
const newState = {
|
||||
|
@ -77,6 +79,8 @@ const stats = handleActions(
|
|||
numReplacedSafebrowsing,
|
||||
numReplacedSafesearch,
|
||||
avgProcessingTime,
|
||||
topUpstreamsResponses,
|
||||
topUpstreamsAvgTime,
|
||||
};
|
||||
|
||||
return newState;
|
||||
|
|
|
@ -139,10 +139,14 @@ func (s *Server) updateStats(
|
|||
clientIP string,
|
||||
) {
|
||||
pctx := ctx.proxyCtx
|
||||
e := stats.Entry{
|
||||
e := &stats.Entry{
|
||||
Domain: aghnet.NormalizeDomain(pctx.Req.Question[0].Name),
|
||||
Result: stats.RNotFiltered,
|
||||
Time: uint32(elapsed / 1000),
|
||||
Time: elapsed,
|
||||
}
|
||||
|
||||
if pctx.Upstream != nil {
|
||||
e.Upstream = pctx.Upstream.Address()
|
||||
}
|
||||
|
||||
if clientID := ctx.clientID; clientID != "" {
|
||||
|
|
|
@ -41,11 +41,11 @@ type testStats struct {
|
|||
// without actually implementing all methods.
|
||||
stats.Interface
|
||||
|
||||
lastEntry stats.Entry
|
||||
lastEntry *stats.Entry
|
||||
}
|
||||
|
||||
// Update implements the [stats.Interface] interface for *testStats.
|
||||
func (l *testStats) Update(e stats.Entry) {
|
||||
func (l *testStats) Update(e *stats.Entry) {
|
||||
if e.Domain == "" {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -19,6 +19,9 @@ import (
|
|||
// The key is either a client's address or a requested address.
|
||||
type topAddrs = map[string]uint64
|
||||
|
||||
// topAddrsFloat is like [topAddrs] but the value is float64 number.
|
||||
type topAddrsFloat = map[string]float64
|
||||
|
||||
// StatsResp is a response to the GET /control/stats.
|
||||
type StatsResp struct {
|
||||
TimeUnits string `json:"time_units"`
|
||||
|
@ -27,6 +30,9 @@ type StatsResp struct {
|
|||
TopClients []topAddrs `json:"top_clients"`
|
||||
TopBlocked []topAddrs `json:"top_blocked_domains"`
|
||||
|
||||
TopUpstreamsResponses []topAddrs `json:"top_upstreams_responses"`
|
||||
TopUpstreamsAvgTime []topAddrsFloat `json:"top_upstreams_avg_time"`
|
||||
|
||||
DNSQueries []uint64 `json:"dns_queries"`
|
||||
|
||||
BlockedFiltering []uint64 `json:"blocked_filtering"`
|
||||
|
|
|
@ -5,7 +5,6 @@ package stats
|
|||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/netip"
|
||||
"os"
|
||||
"sync"
|
||||
|
@ -80,7 +79,7 @@ type Interface interface {
|
|||
io.Closer
|
||||
|
||||
// Update collects the incoming statistics data.
|
||||
Update(e Entry)
|
||||
Update(e *Entry)
|
||||
|
||||
// GetTopClientIP returns at most limit IP addresses corresponding to the
|
||||
// clients with the most number of requests.
|
||||
|
@ -225,7 +224,7 @@ func (s *StatsCtx) Start() {
|
|||
go s.periodicFlush()
|
||||
}
|
||||
|
||||
// Close implements the io.Closer interface for *StatsCtx.
|
||||
// Close implements the [io.Closer] interface for *StatsCtx.
|
||||
func (s *StatsCtx) Close() (err error) {
|
||||
defer func() { err = errors.Annotate(err, "stats: closing: %w") }()
|
||||
|
||||
|
@ -256,8 +255,9 @@ func (s *StatsCtx) Close() (err error) {
|
|||
return udb.flushUnitToDB(tx, s.curr.id)
|
||||
}
|
||||
|
||||
// Update implements the Interface interface for *StatsCtx.
|
||||
func (s *StatsCtx) Update(e Entry) {
|
||||
// Update implements the [Interface] interface for *StatsCtx. e must not be
|
||||
// nil.
|
||||
func (s *StatsCtx) Update(e *Entry) {
|
||||
s.confMu.Lock()
|
||||
defer s.confMu.Unlock()
|
||||
|
||||
|
@ -265,8 +265,9 @@ func (s *StatsCtx) Update(e Entry) {
|
|||
return
|
||||
}
|
||||
|
||||
if e.Result == 0 || e.Result >= resultLast || e.Domain == "" || e.Client == "" {
|
||||
log.Debug("stats: malformed entry")
|
||||
err := e.validate()
|
||||
if err != nil {
|
||||
log.Debug("stats: updating: validating entry: %s", err)
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -280,15 +281,10 @@ func (s *StatsCtx) Update(e Entry) {
|
|||
return
|
||||
}
|
||||
|
||||
clientID := e.Client
|
||||
if ip := net.ParseIP(clientID); ip != nil {
|
||||
clientID = ip.String()
|
||||
}
|
||||
|
||||
s.curr.add(e.Result, e.Domain, clientID, uint64(e.Time))
|
||||
s.curr.add(e)
|
||||
}
|
||||
|
||||
// WriteDiskConfig implements the Interface interface for *StatsCtx.
|
||||
// WriteDiskConfig implements the [Interface] interface for *StatsCtx.
|
||||
func (s *StatsCtx) WriteDiskConfig(dc *Config) {
|
||||
s.confMu.RLock()
|
||||
defer s.confMu.RUnlock()
|
||||
|
@ -412,6 +408,12 @@ func (s *StatsCtx) flush() (cont bool, sleepFor time.Duration) {
|
|||
return true, time.Second
|
||||
}
|
||||
|
||||
return s.flushDB(id, limit, ptr)
|
||||
}
|
||||
|
||||
// flushDB flushes the unit to the database. confMu and currMu are expected to
|
||||
// be locked.
|
||||
func (s *StatsCtx) flushDB(id, limit uint32, ptr *unit) (cont bool, sleepFor time.Duration) {
|
||||
db := s.db.Load()
|
||||
if db == nil {
|
||||
return true, 0
|
||||
|
|
|
@ -50,11 +50,11 @@ func TestStats_races(t *testing.T) {
|
|||
testutil.CleanupAndRequireSuccess(t, s.Close)
|
||||
|
||||
writeFunc := func(start, fin *sync.WaitGroup, waitCh <-chan unit, i int) {
|
||||
e := Entry{
|
||||
e := &Entry{
|
||||
Domain: fmt.Sprintf("example-%d.org", i),
|
||||
Client: fmt.Sprintf("client_%d", i),
|
||||
Result: Result(i)%(resultLast-1) + 1,
|
||||
Time: uint32(time.Since(startTime).Milliseconds()),
|
||||
Time: time.Since(startTime),
|
||||
}
|
||||
|
||||
start.Done()
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"path/filepath"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/AdguardTeam/AdGuardHome/internal/stats"
|
||||
"github.com/AdguardTeam/golibs/netutil"
|
||||
|
@ -72,24 +73,29 @@ func TestStats(t *testing.T) {
|
|||
|
||||
t.Run("data", func(t *testing.T) {
|
||||
const reqDomain = "domain"
|
||||
const respUpstream = "upstream"
|
||||
|
||||
entries := []stats.Entry{{
|
||||
Domain: reqDomain,
|
||||
Client: cliIPStr,
|
||||
Result: stats.RFiltered,
|
||||
Time: 123456,
|
||||
entries := []*stats.Entry{{
|
||||
Domain: reqDomain,
|
||||
Client: cliIPStr,
|
||||
Result: stats.RFiltered,
|
||||
Time: time.Microsecond * 123456,
|
||||
Upstream: respUpstream,
|
||||
}, {
|
||||
Domain: reqDomain,
|
||||
Client: cliIPStr,
|
||||
Result: stats.RNotFiltered,
|
||||
Time: 123456,
|
||||
Domain: reqDomain,
|
||||
Client: cliIPStr,
|
||||
Result: stats.RNotFiltered,
|
||||
Time: time.Microsecond * 123456,
|
||||
Upstream: respUpstream,
|
||||
}}
|
||||
|
||||
wantData := &stats.StatsResp{
|
||||
TimeUnits: "hours",
|
||||
TopQueried: []map[string]uint64{0: {reqDomain: 1}},
|
||||
TopClients: []map[string]uint64{0: {cliIPStr: 2}},
|
||||
TopBlocked: []map[string]uint64{0: {reqDomain: 1}},
|
||||
TimeUnits: "hours",
|
||||
TopQueried: []map[string]uint64{0: {reqDomain: 1}},
|
||||
TopClients: []map[string]uint64{0: {cliIPStr: 2}},
|
||||
TopBlocked: []map[string]uint64{0: {reqDomain: 1}},
|
||||
TopUpstreamsResponses: []map[string]uint64{0: {respUpstream: 2}},
|
||||
TopUpstreamsAvgTime: []map[string]float64{0: {respUpstream: 0.123456}},
|
||||
DNSQueries: []uint64{
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2,
|
||||
|
@ -138,14 +144,16 @@ func TestStats(t *testing.T) {
|
|||
|
||||
_24zeroes := [24]uint64{}
|
||||
emptyData := &stats.StatsResp{
|
||||
TimeUnits: "hours",
|
||||
TopQueried: []map[string]uint64{},
|
||||
TopClients: []map[string]uint64{},
|
||||
TopBlocked: []map[string]uint64{},
|
||||
DNSQueries: _24zeroes[:],
|
||||
BlockedFiltering: _24zeroes[:],
|
||||
ReplacedSafebrowsing: _24zeroes[:],
|
||||
ReplacedParental: _24zeroes[:],
|
||||
TimeUnits: "hours",
|
||||
TopQueried: []map[string]uint64{},
|
||||
TopClients: []map[string]uint64{},
|
||||
TopBlocked: []map[string]uint64{},
|
||||
TopUpstreamsResponses: []map[string]uint64{},
|
||||
TopUpstreamsAvgTime: []map[string]float64{},
|
||||
DNSQueries: _24zeroes[:],
|
||||
BlockedFiltering: _24zeroes[:],
|
||||
ReplacedSafebrowsing: _24zeroes[:],
|
||||
ReplacedParental: _24zeroes[:],
|
||||
}
|
||||
|
||||
req = httptest.NewRequest(http.MethodGet, "/control/stats", nil)
|
||||
|
@ -187,7 +195,7 @@ func TestLargeNumbers(t *testing.T) {
|
|||
|
||||
for i := 0; i < cliNumPerHour; i++ {
|
||||
ip := net.IP{127, 0, byte((i & 0xff00) >> 8), byte(i & 0xff)}
|
||||
e := stats.Entry{
|
||||
e := &stats.Entry{
|
||||
Domain: fmt.Sprintf("domain%d.hour%d", i, h),
|
||||
Client: ip.String(),
|
||||
Result: stats.RNotFiltered,
|
||||
|
|
|
@ -11,17 +11,19 @@ import (
|
|||
"github.com/AdguardTeam/golibs/log"
|
||||
"github.com/AdguardTeam/golibs/stringutil"
|
||||
"go.etcd.io/bbolt"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// TODO(a.garipov): Rewrite all of this. Add proper error handling and
|
||||
// inspection. Improve logging. Decrease complexity.
|
||||
|
||||
const (
|
||||
// maxDomains is the max number of top domains to return.
|
||||
maxDomains = 100
|
||||
|
||||
// maxClients is the max number of top clients to return.
|
||||
maxClients = 100
|
||||
|
||||
// maxUpstreams is the max number of top upstreams to return.
|
||||
maxUpstreams = 100
|
||||
)
|
||||
|
||||
// UnitIDGenFunc is the signature of a function that generates a unique ID for
|
||||
|
@ -63,11 +65,30 @@ type Entry struct {
|
|||
// Domain is the domain name requested.
|
||||
Domain string
|
||||
|
||||
// Upstream is the upstream DNS server.
|
||||
Upstream string
|
||||
|
||||
// Result is the result of processing the request.
|
||||
Result Result
|
||||
|
||||
// Time is the duration of the request processing in milliseconds.
|
||||
Time uint32
|
||||
// Time is the duration of the request processing.
|
||||
Time time.Duration
|
||||
}
|
||||
|
||||
// validate returs an error if entry is not valid.
|
||||
func (e *Entry) validate() (err error) {
|
||||
switch {
|
||||
case e.Result == 0:
|
||||
return errors.Error("result code is not set")
|
||||
case e.Result >= resultLast:
|
||||
return fmt.Errorf("unknown result code %d", e.Result)
|
||||
case e.Domain == "":
|
||||
return errors.Error("domain is empty")
|
||||
case e.Client == "":
|
||||
return errors.Error("client is empty")
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// unit collects the statistics data for a specific period of time.
|
||||
|
@ -82,6 +103,13 @@ type unit struct {
|
|||
// clients stores the number of requests from each client.
|
||||
clients map[string]uint64
|
||||
|
||||
// upstreamsResponses stores the number of responses from each upstream.
|
||||
upstreamsResponses map[string]uint64
|
||||
|
||||
// upstreamsTimeSum stores the sum of processing time in microseconds of
|
||||
// responses from each upstream.
|
||||
upstreamsTimeSum map[string]uint64
|
||||
|
||||
// nResult stores the number of requests grouped by it's result.
|
||||
nResult []uint64
|
||||
|
||||
|
@ -95,7 +123,7 @@ type unit struct {
|
|||
// nTotal stores the total number of requests.
|
||||
nTotal uint64
|
||||
|
||||
// timeSum stores the sum of processing time in milliseconds of each request
|
||||
// timeSum stores the sum of processing time in microseconds of each request
|
||||
// written by the unit.
|
||||
timeSum uint64
|
||||
}
|
||||
|
@ -103,11 +131,13 @@ type unit struct {
|
|||
// newUnit allocates the new *unit.
|
||||
func newUnit(id uint32) (u *unit) {
|
||||
return &unit{
|
||||
domains: map[string]uint64{},
|
||||
blockedDomains: map[string]uint64{},
|
||||
clients: map[string]uint64{},
|
||||
nResult: make([]uint64, resultLast),
|
||||
id: id,
|
||||
domains: map[string]uint64{},
|
||||
blockedDomains: map[string]uint64{},
|
||||
clients: map[string]uint64{},
|
||||
upstreamsResponses: map[string]uint64{},
|
||||
upstreamsTimeSum: map[string]uint64{},
|
||||
nResult: make([]uint64, resultLast),
|
||||
id: id,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -135,10 +165,17 @@ type unitDB struct {
|
|||
// Clients is the number of requests from each client.
|
||||
Clients []countPair
|
||||
|
||||
// UpstreamsResponses is the number of responses from each upstream.
|
||||
UpstreamsResponses []countPair
|
||||
|
||||
// UpstreamsTimeSum is the sum of processing time in microseconds of
|
||||
// responses from each upstream.
|
||||
UpstreamsTimeSum []countPair
|
||||
|
||||
// NTotal is the total number of requests.
|
||||
NTotal uint64
|
||||
|
||||
// TimeAvg is the average of processing times in milliseconds of all the
|
||||
// TimeAvg is the average of processing times in microseconds of all the
|
||||
// requests in the unit.
|
||||
TimeAvg uint32
|
||||
}
|
||||
|
@ -218,12 +255,14 @@ func (u *unit) serialize() (udb *unitDB) {
|
|||
}
|
||||
|
||||
return &unitDB{
|
||||
NTotal: u.nTotal,
|
||||
NResult: append([]uint64{}, u.nResult...),
|
||||
Domains: convertMapToSlice(u.domains, maxDomains),
|
||||
BlockedDomains: convertMapToSlice(u.blockedDomains, maxDomains),
|
||||
Clients: convertMapToSlice(u.clients, maxClients),
|
||||
TimeAvg: timeAvg,
|
||||
NTotal: u.nTotal,
|
||||
NResult: append([]uint64{}, u.nResult...),
|
||||
Domains: convertMapToSlice(u.domains, maxDomains),
|
||||
BlockedDomains: convertMapToSlice(u.blockedDomains, maxDomains),
|
||||
Clients: convertMapToSlice(u.clients, maxClients),
|
||||
UpstreamsResponses: convertMapToSlice(u.upstreamsResponses, maxUpstreams),
|
||||
UpstreamsTimeSum: convertMapToSlice(u.upstreamsTimeSum, maxUpstreams),
|
||||
TimeAvg: timeAvg,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -262,21 +301,29 @@ func (u *unit) deserialize(udb *unitDB) {
|
|||
u.domains = convertSliceToMap(udb.Domains)
|
||||
u.blockedDomains = convertSliceToMap(udb.BlockedDomains)
|
||||
u.clients = convertSliceToMap(udb.Clients)
|
||||
u.upstreamsResponses = convertSliceToMap(udb.UpstreamsResponses)
|
||||
u.upstreamsTimeSum = convertSliceToMap(udb.UpstreamsTimeSum)
|
||||
u.timeSum = uint64(udb.TimeAvg) * udb.NTotal
|
||||
}
|
||||
|
||||
// add adds new data to u. It's safe for concurrent use.
|
||||
func (u *unit) add(res Result, domain, cli string, dur uint64) {
|
||||
u.nResult[res]++
|
||||
if res == RNotFiltered {
|
||||
u.domains[domain]++
|
||||
func (u *unit) add(e *Entry) {
|
||||
u.nResult[e.Result]++
|
||||
if e.Result == RNotFiltered {
|
||||
u.domains[e.Domain]++
|
||||
} else {
|
||||
u.blockedDomains[domain]++
|
||||
u.blockedDomains[e.Domain]++
|
||||
}
|
||||
|
||||
u.clients[cli]++
|
||||
u.timeSum += dur
|
||||
u.clients[e.Client]++
|
||||
t := uint64(e.Time.Microseconds())
|
||||
u.timeSum += t
|
||||
u.nTotal++
|
||||
|
||||
if e.Upstream != "" {
|
||||
u.upstreamsResponses[e.Upstream]++
|
||||
u.upstreamsTimeSum[e.Upstream] += t
|
||||
}
|
||||
}
|
||||
|
||||
// flushUnitToDB puts udb to the database at id.
|
||||
|
@ -390,9 +437,11 @@ func (s *StatsCtx) getData(limit uint32) (StatsResp, bool) {
|
|||
return StatsResp{
|
||||
TimeUnits: "days",
|
||||
|
||||
TopBlocked: []topAddrs{},
|
||||
TopClients: []topAddrs{},
|
||||
TopQueried: []topAddrs{},
|
||||
TopBlocked: []topAddrs{},
|
||||
TopClients: []topAddrs{},
|
||||
TopQueried: []topAddrs{},
|
||||
TopUpstreamsResponses: []topAddrs{},
|
||||
TopUpstreamsAvgTime: []topAddrsFloat{},
|
||||
|
||||
BlockedFiltering: []uint64{},
|
||||
DNSQueries: []uint64{},
|
||||
|
@ -416,21 +465,35 @@ func (s *StatsCtx) getData(limit uint32) (StatsResp, bool) {
|
|||
log.Fatalf("len(dnsQueries) != limit: %d %d", len(dnsQueries), limit)
|
||||
}
|
||||
|
||||
return s.dataFromUnits(units, dnsQueries, firstID, timeUnit), true
|
||||
}
|
||||
|
||||
// dataFromUnits collects and returns the statistics data.
|
||||
func (s *StatsCtx) dataFromUnits(
|
||||
units []*unitDB,
|
||||
dnsQueries []uint64,
|
||||
firstID uint32,
|
||||
timeUnit TimeUnit,
|
||||
) (resp StatsResp) {
|
||||
topUpstreamsResponses, topUpstreamsAvgTime := topUpstreamsPairs(units)
|
||||
|
||||
data := StatsResp{
|
||||
DNSQueries: dnsQueries,
|
||||
BlockedFiltering: statsCollector(units, firstID, timeUnit, func(u *unitDB) (num uint64) { return u.NResult[RFiltered] }),
|
||||
ReplacedSafebrowsing: statsCollector(units, firstID, timeUnit, func(u *unitDB) (num uint64) { return u.NResult[RSafeBrowsing] }),
|
||||
ReplacedParental: statsCollector(units, firstID, timeUnit, func(u *unitDB) (num uint64) { return u.NResult[RParental] }),
|
||||
TopQueried: topsCollector(units, maxDomains, s.ignored, func(u *unitDB) (pairs []countPair) { return u.Domains }),
|
||||
TopBlocked: topsCollector(units, maxDomains, s.ignored, func(u *unitDB) (pairs []countPair) { return u.BlockedDomains }),
|
||||
TopClients: topsCollector(units, maxClients, nil, topClientPairs(s)),
|
||||
DNSQueries: dnsQueries,
|
||||
BlockedFiltering: statsCollector(units, firstID, timeUnit, func(u *unitDB) (num uint64) { return u.NResult[RFiltered] }),
|
||||
ReplacedSafebrowsing: statsCollector(units, firstID, timeUnit, func(u *unitDB) (num uint64) { return u.NResult[RSafeBrowsing] }),
|
||||
ReplacedParental: statsCollector(units, firstID, timeUnit, func(u *unitDB) (num uint64) { return u.NResult[RParental] }),
|
||||
TopQueried: topsCollector(units, maxDomains, s.ignored, func(u *unitDB) (pairs []countPair) { return u.Domains }),
|
||||
TopBlocked: topsCollector(units, maxDomains, s.ignored, func(u *unitDB) (pairs []countPair) { return u.BlockedDomains }),
|
||||
TopUpstreamsResponses: topUpstreamsResponses,
|
||||
TopUpstreamsAvgTime: topUpstreamsAvgTime,
|
||||
TopClients: topsCollector(units, maxClients, nil, topClientPairs(s)),
|
||||
}
|
||||
|
||||
// Total counters:
|
||||
sum := unitDB{
|
||||
NResult: make([]uint64, resultLast),
|
||||
}
|
||||
timeN := 0
|
||||
var timeN uint32
|
||||
for _, u := range units {
|
||||
sum.NTotal += u.NTotal
|
||||
sum.TimeAvg += u.TimeAvg
|
||||
|
@ -450,7 +513,7 @@ func (s *StatsCtx) getData(limit uint32) (StatsResp, bool) {
|
|||
data.NumReplacedParental = sum.NResult[RParental]
|
||||
|
||||
if timeN != 0 {
|
||||
data.AvgProcessingTime = float64(sum.TimeAvg/uint32(timeN)) / 1000000
|
||||
data.AvgProcessingTime = microsecondsToSeconds(float64(sum.TimeAvg / timeN))
|
||||
}
|
||||
|
||||
data.TimeUnits = "hours"
|
||||
|
@ -458,7 +521,7 @@ func (s *StatsCtx) getData(limit uint32) (StatsResp, bool) {
|
|||
data.TimeUnits = "days"
|
||||
}
|
||||
|
||||
return data, true
|
||||
return data
|
||||
}
|
||||
|
||||
func topClientPairs(s *StatsCtx) (pg pairsGetter) {
|
||||
|
@ -474,3 +537,66 @@ func topClientPairs(s *StatsCtx) (pg pairsGetter) {
|
|||
return clients
|
||||
}
|
||||
}
|
||||
|
||||
// topUpstreamsPairs returns sorted lists of number of total responses and the
|
||||
// average of processing time for each upstream.
|
||||
func topUpstreamsPairs(
|
||||
units []*unitDB,
|
||||
) (topUpstreamsResponses []topAddrs, topUpstreamsAvgTime []topAddrsFloat) {
|
||||
upstreamsResponses := topAddrs{}
|
||||
upstreamsTimeSum := topAddrsFloat{}
|
||||
|
||||
for _, u := range units {
|
||||
for _, cp := range u.UpstreamsResponses {
|
||||
upstreamsResponses[cp.Name] += cp.Count
|
||||
}
|
||||
|
||||
for _, cp := range u.UpstreamsTimeSum {
|
||||
upstreamsTimeSum[cp.Name] += float64(cp.Count)
|
||||
}
|
||||
}
|
||||
|
||||
upstreamsAvgTime := topAddrsFloat{}
|
||||
|
||||
for u, n := range upstreamsResponses {
|
||||
total := upstreamsTimeSum[u]
|
||||
|
||||
if total != 0 {
|
||||
upstreamsAvgTime[u] = microsecondsToSeconds(total / float64(n))
|
||||
}
|
||||
}
|
||||
|
||||
upstreamsPairs := convertMapToSlice(upstreamsResponses, maxUpstreams)
|
||||
topUpstreamsResponses = convertTopSlice(upstreamsPairs)
|
||||
|
||||
return topUpstreamsResponses, prepareTopUpstreamsAvgTime(upstreamsAvgTime)
|
||||
}
|
||||
|
||||
// microsecondsToSeconds converts microseconds to seconds.
|
||||
//
|
||||
// NOTE: Frontend expects time duration in seconds as floating-point number
|
||||
// with double precision.
|
||||
func microsecondsToSeconds(n float64) (r float64) {
|
||||
const micro = 1e-6
|
||||
|
||||
return n * micro
|
||||
}
|
||||
|
||||
// prepareTopUpstreamsAvgTime returns sorted list of average processing times
|
||||
// of the DNS requests from each upstream.
|
||||
func prepareTopUpstreamsAvgTime(
|
||||
upstreamsAvgTime topAddrsFloat,
|
||||
) (topUpstreamsAvgTime []topAddrsFloat) {
|
||||
keys := maps.Keys(upstreamsAvgTime)
|
||||
|
||||
slices.SortFunc(keys, func(a, b string) (sortsBefore bool) {
|
||||
return upstreamsAvgTime[a] > upstreamsAvgTime[b]
|
||||
})
|
||||
|
||||
topUpstreamsAvgTime = make([]topAddrsFloat, 0, len(upstreamsAvgTime))
|
||||
for _, k := range keys {
|
||||
topUpstreamsAvgTime = append(topUpstreamsAvgTime, topAddrsFloat{k: upstreamsAvgTime[k]})
|
||||
}
|
||||
|
||||
return topUpstreamsAvgTime
|
||||
}
|
||||
|
|
177
internal/stats/unit_internal_test.go
Normal file
177
internal/stats/unit_internal_test.go
Normal file
|
@ -0,0 +1,177 @@
|
|||
package stats
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestUnit_Deserialize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
db *unitDB
|
||||
name string
|
||||
want unit
|
||||
}{{
|
||||
name: "empty",
|
||||
want: unit{
|
||||
domains: map[string]uint64{},
|
||||
blockedDomains: map[string]uint64{},
|
||||
clients: map[string]uint64{},
|
||||
nResult: []uint64{0, 0, 0, 0, 0, 0},
|
||||
id: 0,
|
||||
nTotal: 0,
|
||||
timeSum: 0,
|
||||
upstreamsResponses: map[string]uint64{},
|
||||
upstreamsTimeSum: map[string]uint64{},
|
||||
},
|
||||
db: &unitDB{
|
||||
NResult: []uint64{0, 0, 0, 0, 0, 0},
|
||||
Domains: []countPair{},
|
||||
BlockedDomains: []countPair{},
|
||||
Clients: []countPair{},
|
||||
NTotal: 0,
|
||||
TimeAvg: 0,
|
||||
UpstreamsResponses: []countPair{},
|
||||
UpstreamsTimeSum: []countPair{},
|
||||
},
|
||||
}, {
|
||||
name: "basic",
|
||||
want: unit{
|
||||
domains: map[string]uint64{
|
||||
"example.com": 1,
|
||||
},
|
||||
blockedDomains: map[string]uint64{
|
||||
"example.net": 1,
|
||||
},
|
||||
clients: map[string]uint64{
|
||||
"127.0.0.1": 2,
|
||||
},
|
||||
nResult: []uint64{0, 1, 1, 0, 0, 0},
|
||||
id: 0,
|
||||
nTotal: 2,
|
||||
timeSum: 246912,
|
||||
upstreamsResponses: map[string]uint64{
|
||||
"1.2.3.4": 2,
|
||||
},
|
||||
upstreamsTimeSum: map[string]uint64{
|
||||
"1.2.3.4": 246912,
|
||||
},
|
||||
},
|
||||
db: &unitDB{
|
||||
NResult: []uint64{0, 1, 1, 0, 0, 0},
|
||||
Domains: []countPair{{
|
||||
"example.com", 1,
|
||||
}},
|
||||
BlockedDomains: []countPair{{
|
||||
"example.net", 1,
|
||||
}},
|
||||
Clients: []countPair{{
|
||||
"127.0.0.1", 2,
|
||||
}},
|
||||
NTotal: 2,
|
||||
TimeAvg: 123456,
|
||||
UpstreamsResponses: []countPair{{
|
||||
"1.2.3.4", 2,
|
||||
}},
|
||||
UpstreamsTimeSum: []countPair{{
|
||||
"1.2.3.4", 246912,
|
||||
}},
|
||||
},
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
got := unit{}
|
||||
got.deserialize(tc.db)
|
||||
require.Equal(t, tc.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTopUpstreamsPairs(t *testing.T) {
|
||||
testCases := []struct {
|
||||
db *unitDB
|
||||
name string
|
||||
wantResponses []topAddrs
|
||||
wantAvgTime []topAddrsFloat
|
||||
}{{
|
||||
name: "empty",
|
||||
db: &unitDB{
|
||||
NResult: []uint64{0, 0, 0, 0, 0, 0},
|
||||
Domains: []countPair{},
|
||||
BlockedDomains: []countPair{},
|
||||
Clients: []countPair{},
|
||||
NTotal: 0,
|
||||
TimeAvg: 0,
|
||||
UpstreamsResponses: []countPair{},
|
||||
UpstreamsTimeSum: []countPair{},
|
||||
},
|
||||
wantResponses: []topAddrs{},
|
||||
wantAvgTime: []topAddrsFloat{},
|
||||
}, {
|
||||
name: "basic",
|
||||
db: &unitDB{
|
||||
NResult: []uint64{0, 0, 0, 0, 0, 0},
|
||||
Domains: []countPair{},
|
||||
BlockedDomains: []countPair{},
|
||||
Clients: []countPair{},
|
||||
NTotal: 0,
|
||||
TimeAvg: 0,
|
||||
UpstreamsResponses: []countPair{{
|
||||
"1.2.3.4", 2,
|
||||
}},
|
||||
UpstreamsTimeSum: []countPair{{
|
||||
"1.2.3.4", 246912,
|
||||
}},
|
||||
},
|
||||
wantResponses: []topAddrs{{
|
||||
"1.2.3.4": 2,
|
||||
}},
|
||||
wantAvgTime: []topAddrsFloat{{
|
||||
"1.2.3.4": 0.123456,
|
||||
}},
|
||||
}, {
|
||||
name: "sorted",
|
||||
db: &unitDB{
|
||||
NResult: []uint64{0, 0, 0, 0, 0, 0},
|
||||
Domains: []countPair{},
|
||||
BlockedDomains: []countPair{},
|
||||
Clients: []countPair{},
|
||||
NTotal: 0,
|
||||
TimeAvg: 0,
|
||||
UpstreamsResponses: []countPair{
|
||||
{"3.3.3.3", 8},
|
||||
{"2.2.2.2", 4},
|
||||
{"4.4.4.4", 16},
|
||||
{"1.1.1.1", 2},
|
||||
},
|
||||
UpstreamsTimeSum: []countPair{
|
||||
{"3.3.3.3", 800_000_000},
|
||||
{"2.2.2.2", 40_000_000},
|
||||
{"4.4.4.4", 16_000_000_000},
|
||||
{"1.1.1.1", 2_000_000},
|
||||
},
|
||||
},
|
||||
wantResponses: []topAddrs{
|
||||
{"4.4.4.4": 16},
|
||||
{"3.3.3.3": 8},
|
||||
{"2.2.2.2": 4},
|
||||
{"1.1.1.1": 2},
|
||||
},
|
||||
wantAvgTime: []topAddrsFloat{
|
||||
{"4.4.4.4": 1000},
|
||||
{"3.3.3.3": 100},
|
||||
{"2.2.2.2": 10},
|
||||
{"1.1.1.1": 1},
|
||||
},
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
gotResponses, gotAvgTime := topUpstreamsPairs([]*unitDB{tc.db})
|
||||
assert.Equal(t, tc.wantResponses, gotResponses)
|
||||
assert.Equal(t, tc.wantAvgTime, gotAvgTime)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -4,6 +4,16 @@
|
|||
|
||||
## v0.108.0: API changes
|
||||
|
||||
## v0.107.36: API changes
|
||||
|
||||
### The new fields `"top_upstreams_responses"` and `"top_upstreams_avg_time"` in `Stats` object
|
||||
|
||||
* The new field `"top_upstreams_responses"` in `GET /control/stats` method
|
||||
shows the total number of responses from each upstream.
|
||||
|
||||
* The new field `"top_upstrems_avg_time"` in `GET /control/stats` method shows
|
||||
the average processing time in seconds of requests from each upstream.
|
||||
|
||||
## v0.107.30: API changes
|
||||
|
||||
### `POST /control/version.json` and `GET /control/dhcp/interfaces` content type
|
||||
|
|
|
@ -1728,7 +1728,7 @@
|
|||
'avg_processing_time':
|
||||
'type': 'number'
|
||||
'format': 'float'
|
||||
'description': 'Average time in milliseconds on processing a DNS'
|
||||
'description': 'Average time in seconds on processing a DNS request'
|
||||
'example': 0.34
|
||||
'top_queried_domains':
|
||||
'type': 'array'
|
||||
|
@ -1742,6 +1742,19 @@
|
|||
'type': 'array'
|
||||
'items':
|
||||
'$ref': '#/components/schemas/TopArrayEntry'
|
||||
'top_upstreams_responses':
|
||||
'type': 'array'
|
||||
'description': 'Total number of responses from each upstream.'
|
||||
'items':
|
||||
'$ref': '#/components/schemas/TopArrayEntry'
|
||||
'maxItems': 100
|
||||
'top_upstreams_avg_time':
|
||||
'type': 'array'
|
||||
'description': >
|
||||
Average processing time in seconds of requests from each upstream.
|
||||
'items':
|
||||
'$ref': '#/components/schemas/TopArrayEntry'
|
||||
'maxItems': 100
|
||||
'dns_queries':
|
||||
'type': 'array'
|
||||
'items':
|
||||
|
@ -1761,12 +1774,13 @@
|
|||
'TopArrayEntry':
|
||||
'type': 'object'
|
||||
'description': >
|
||||
Represent the number of hits per key (domain or client IP).
|
||||
Represent the number of hits or time duration per key (url, domain, or
|
||||
client IP).
|
||||
'properties':
|
||||
'domain_or_ip':
|
||||
'type': 'integer'
|
||||
'type': 'number'
|
||||
'additionalProperties':
|
||||
'type': 'integer'
|
||||
'type': 'number'
|
||||
'StatsConfig':
|
||||
'type': 'object'
|
||||
'description': 'Statistics configuration'
|
||||
|
|
|
@ -184,6 +184,7 @@ run_linter gocognit --over 10\
|
|||
./internal/next/\
|
||||
./internal/rdns/\
|
||||
./internal/schedule/\
|
||||
./internal/stats/\
|
||||
./internal/tools/\
|
||||
./internal/version/\
|
||||
./internal/whois/\
|
||||
|
@ -196,7 +197,6 @@ run_linter gocognit --over 19 ./internal/dnsforward/ ./internal/home/
|
|||
run_linter gocognit --over 18 ./internal/aghtls/
|
||||
run_linter gocognit --over 17 ./internal/filtering ./internal/filtering/rewrite/
|
||||
run_linter gocognit --over 15 ./internal/aghos/ ./internal/dhcpd/
|
||||
run_linter gocognit --over 14 ./internal/stats/
|
||||
run_linter gocognit --over 12 ./internal/updater/
|
||||
run_linter gocognit --over 11 ./internal/aghtest/
|
||||
|
||||
|
|
Loading…
Reference in a new issue