Merge pull request #14039 from Chocobo1/stats

Don't use removed stat metric in libtorrent 2.0
This commit is contained in:
Mike Tzou 2020-12-25 17:55:56 +08:00 committed by GitHub
commit e1073de36f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 77 additions and 101 deletions

View file

@ -38,6 +38,6 @@ namespace BitTorrent
quint64 jobQueueLength = 0;
quint64 averageJobTime = 0;
quint64 queuedBytes = 0;
qreal readRatio = 0.0;
qreal readRatio = 0; // TODO: remove when LIBTORRENT_VERSION_NUM >= 20000
};
}

View file

@ -1163,80 +1163,44 @@ void Session::applyBandwidthLimits(lt::settings_pack &settingsPack) const
void Session::initMetrics()
{
m_metricIndices.net.hasIncomingConnections = lt::find_metric_idx("net.has_incoming_connections");
Q_ASSERT(m_metricIndices.net.hasIncomingConnections >= 0);
const auto findMetricIndex = [](const char *name) -> int
{
const int index = lt::find_metric_idx(name);
Q_ASSERT(index >= 0);
return index;
};
m_metricIndices.net.sentPayloadBytes = lt::find_metric_idx("net.sent_payload_bytes");
Q_ASSERT(m_metricIndices.net.sentPayloadBytes >= 0);
// TODO: switch to "designated initializers" in C++20
m_metricIndices.net.hasIncomingConnections = findMetricIndex("net.has_incoming_connections");
m_metricIndices.net.sentPayloadBytes = findMetricIndex("net.sent_payload_bytes");
m_metricIndices.net.recvPayloadBytes = findMetricIndex("net.recv_payload_bytes");
m_metricIndices.net.sentBytes = findMetricIndex("net.sent_bytes");
m_metricIndices.net.recvBytes = findMetricIndex("net.recv_bytes");
m_metricIndices.net.sentIPOverheadBytes = findMetricIndex("net.sent_ip_overhead_bytes");
m_metricIndices.net.recvIPOverheadBytes = findMetricIndex("net.recv_ip_overhead_bytes");
m_metricIndices.net.sentTrackerBytes = findMetricIndex("net.sent_tracker_bytes");
m_metricIndices.net.recvTrackerBytes = findMetricIndex("net.recv_tracker_bytes");
m_metricIndices.net.recvRedundantBytes = findMetricIndex("net.recv_redundant_bytes");
m_metricIndices.net.recvFailedBytes = findMetricIndex("net.recv_failed_bytes");
m_metricIndices.net.recvPayloadBytes = lt::find_metric_idx("net.recv_payload_bytes");
Q_ASSERT(m_metricIndices.net.recvPayloadBytes >= 0);
m_metricIndices.peer.numPeersConnected = findMetricIndex("peer.num_peers_connected");
m_metricIndices.peer.numPeersDownDisk = findMetricIndex("peer.num_peers_down_disk");
m_metricIndices.peer.numPeersUpDisk = findMetricIndex("peer.num_peers_up_disk");
m_metricIndices.net.sentBytes = lt::find_metric_idx("net.sent_bytes");
Q_ASSERT(m_metricIndices.net.sentBytes >= 0);
m_metricIndices.dht.dhtBytesIn = findMetricIndex("dht.dht_bytes_in");
m_metricIndices.dht.dhtBytesOut = findMetricIndex("dht.dht_bytes_out");
m_metricIndices.dht.dhtNodes = findMetricIndex("dht.dht_nodes");
m_metricIndices.net.recvBytes = lt::find_metric_idx("net.recv_bytes");
Q_ASSERT(m_metricIndices.net.recvBytes >= 0);
m_metricIndices.net.sentIPOverheadBytes = lt::find_metric_idx("net.sent_ip_overhead_bytes");
Q_ASSERT(m_metricIndices.net.sentIPOverheadBytes >= 0);
m_metricIndices.net.recvIPOverheadBytes = lt::find_metric_idx("net.recv_ip_overhead_bytes");
Q_ASSERT(m_metricIndices.net.recvIPOverheadBytes >= 0);
m_metricIndices.net.sentTrackerBytes = lt::find_metric_idx("net.sent_tracker_bytes");
Q_ASSERT(m_metricIndices.net.sentTrackerBytes >= 0);
m_metricIndices.net.recvTrackerBytes = lt::find_metric_idx("net.recv_tracker_bytes");
Q_ASSERT(m_metricIndices.net.recvTrackerBytes >= 0);
m_metricIndices.net.recvRedundantBytes = lt::find_metric_idx("net.recv_redundant_bytes");
Q_ASSERT(m_metricIndices.net.recvRedundantBytes >= 0);
m_metricIndices.net.recvFailedBytes = lt::find_metric_idx("net.recv_failed_bytes");
Q_ASSERT(m_metricIndices.net.recvFailedBytes >= 0);
m_metricIndices.peer.numPeersConnected = lt::find_metric_idx("peer.num_peers_connected");
Q_ASSERT(m_metricIndices.peer.numPeersConnected >= 0);
m_metricIndices.peer.numPeersDownDisk = lt::find_metric_idx("peer.num_peers_down_disk");
Q_ASSERT(m_metricIndices.peer.numPeersDownDisk >= 0);
m_metricIndices.peer.numPeersUpDisk = lt::find_metric_idx("peer.num_peers_up_disk");
Q_ASSERT(m_metricIndices.peer.numPeersUpDisk >= 0);
m_metricIndices.dht.dhtBytesIn = lt::find_metric_idx("dht.dht_bytes_in");
Q_ASSERT(m_metricIndices.dht.dhtBytesIn >= 0);
m_metricIndices.dht.dhtBytesOut = lt::find_metric_idx("dht.dht_bytes_out");
Q_ASSERT(m_metricIndices.dht.dhtBytesOut >= 0);
m_metricIndices.dht.dhtNodes = lt::find_metric_idx("dht.dht_nodes");
Q_ASSERT(m_metricIndices.dht.dhtNodes >= 0);
m_metricIndices.disk.diskBlocksInUse = lt::find_metric_idx("disk.disk_blocks_in_use");
Q_ASSERT(m_metricIndices.disk.diskBlocksInUse >= 0);
m_metricIndices.disk.numBlocksRead = lt::find_metric_idx("disk.num_blocks_read");
Q_ASSERT(m_metricIndices.disk.numBlocksRead >= 0);
m_metricIndices.disk.numBlocksCacheHits = lt::find_metric_idx("disk.num_blocks_cache_hits");
Q_ASSERT(m_metricIndices.disk.numBlocksCacheHits >= 0);
m_metricIndices.disk.writeJobs = lt::find_metric_idx("disk.num_write_ops");
Q_ASSERT(m_metricIndices.disk.writeJobs >= 0);
m_metricIndices.disk.readJobs = lt::find_metric_idx("disk.num_read_ops");
Q_ASSERT(m_metricIndices.disk.readJobs >= 0);
m_metricIndices.disk.hashJobs = lt::find_metric_idx("disk.num_blocks_hashed");
Q_ASSERT(m_metricIndices.disk.hashJobs >= 0);
m_metricIndices.disk.queuedDiskJobs = lt::find_metric_idx("disk.queued_disk_jobs");
Q_ASSERT(m_metricIndices.disk.queuedDiskJobs >= 0);
m_metricIndices.disk.diskJobTime = lt::find_metric_idx("disk.disk_job_time");
Q_ASSERT(m_metricIndices.disk.diskJobTime >= 0);
m_metricIndices.disk.diskBlocksInUse = findMetricIndex("disk.disk_blocks_in_use");
m_metricIndices.disk.numBlocksRead = findMetricIndex("disk.num_blocks_read");
#if (LIBTORRENT_VERSION_NUM < 20000)
m_metricIndices.disk.numBlocksCacheHits = findMetricIndex("disk.num_blocks_cache_hits");
#endif
m_metricIndices.disk.writeJobs = findMetricIndex("disk.num_write_ops");
m_metricIndices.disk.readJobs = findMetricIndex("disk.num_read_ops");
m_metricIndices.disk.hashJobs = findMetricIndex("disk.num_blocks_hashed");
m_metricIndices.disk.queuedDiskJobs = findMetricIndex("disk.queued_disk_jobs");
m_metricIndices.disk.diskJobTime = findMetricIndex("disk.disk_job_time");
}
void Session::loadLTSettings(lt::settings_pack &settingsPack)
@ -4908,11 +4872,14 @@ void Session::handleSessionStatsAlert(const lt::session_stats_alert *p)
m_status.peersCount = stats[m_metricIndices.peer.numPeersConnected];
const int64_t numBlocksRead = stats[m_metricIndices.disk.numBlocksRead];
const int64_t numBlocksCacheHits = stats[m_metricIndices.disk.numBlocksCacheHits];
m_cacheStatus.totalUsedBuffers = stats[m_metricIndices.disk.diskBlocksInUse];
m_cacheStatus.readRatio = static_cast<qreal>(numBlocksCacheHits) / std::max<int64_t>(numBlocksCacheHits + numBlocksRead, 1);
m_cacheStatus.jobQueueLength = stats[m_metricIndices.disk.queuedDiskJobs];
#if (LIBTORRENT_VERSION_NUM < 20000)
const int64_t numBlocksCacheHits = stats[m_metricIndices.disk.numBlocksCacheHits];
m_cacheStatus.readRatio = static_cast<qreal>(numBlocksCacheHits) / std::max<int64_t>((numBlocksCacheHits + numBlocksRead), 1);
#endif
const int64_t totalJobs = stats[m_metricIndices.disk.writeJobs] + stats[m_metricIndices.disk.readJobs]
+ stats[m_metricIndices.disk.hashJobs];
m_cacheStatus.averageJobTime = (totalJobs > 0)

View file

@ -166,43 +166,45 @@ namespace BitTorrent
{
struct
{
int hasIncomingConnections = 0;
int sentPayloadBytes = 0;
int recvPayloadBytes = 0;
int sentBytes = 0;
int recvBytes = 0;
int sentIPOverheadBytes = 0;
int recvIPOverheadBytes = 0;
int sentTrackerBytes = 0;
int recvTrackerBytes = 0;
int recvRedundantBytes = 0;
int recvFailedBytes = 0;
int hasIncomingConnections = -1;
int sentPayloadBytes = -1;
int recvPayloadBytes = -1;
int sentBytes = -1;
int recvBytes = -1;
int sentIPOverheadBytes = -1;
int recvIPOverheadBytes = -1;
int sentTrackerBytes = -1;
int recvTrackerBytes = -1;
int recvRedundantBytes = -1;
int recvFailedBytes = -1;
} net;
struct
{
int numPeersConnected = 0;
int numPeersUpDisk = 0;
int numPeersDownDisk = 0;
int numPeersConnected = -1;
int numPeersUpDisk = -1;
int numPeersDownDisk = -1;
} peer;
struct
{
int dhtBytesIn = 0;
int dhtBytesOut = 0;
int dhtNodes = 0;
int dhtBytesIn = -1;
int dhtBytesOut = -1;
int dhtNodes = -1;
} dht;
struct
{
int diskBlocksInUse = 0;
int numBlocksRead = 0;
int numBlocksCacheHits = 0;
int writeJobs = 0;
int readJobs = 0;
int hashJobs = 0;
int queuedDiskJobs = 0;
int diskJobTime = 0;
int diskBlocksInUse = -1;
int numBlocksRead = -1;
#if (LIBTORRENT_VERSION_NUM < 20000)
int numBlocksCacheHits = -1;
#endif
int writeJobs = -1;
int readJobs = -1;
int hashJobs = -1;
int queuedDiskJobs = -1;
int diskJobTime = -1;
} disk;
};

View file

@ -52,6 +52,11 @@ StatsDialog::StatsDialog(QWidget *parent)
connect(BitTorrent::Session::instance(), &BitTorrent::Session::statsUpdated
, this, &StatsDialog::update);
#if (LIBTORRENT_VERSION_NUM >= 20000)
m_ui->labelCacheHitsText->hide();
m_ui->labelCacheHits->hide();
#endif
Utils::Gui::resize(this);
show();
}
@ -78,11 +83,13 @@ void StatsDialog::update()
((atd > 0) && (atu > 0))
? Utils::String::fromDouble(static_cast<qreal>(atu) / atd, 2)
: "-");
#if (LIBTORRENT_VERSION_NUM < 20000)
// Cache hits
qreal readRatio = cs.readRatio;
const qreal readRatio = cs.readRatio;
m_ui->labelCacheHits->setText(QString::fromLatin1("%1%").arg((readRatio > 0)
? Utils::String::fromDouble(100 * readRatio, 2)
: QLatin1String("0")));
#endif
// Buffers size
m_ui->labelTotalBuf->setText(Utils::Misc::friendlyUnit(cs.totalUsedBuffers * 16 * 1024));
// Disk overload (100%) equivalent

View file

@ -35,7 +35,7 @@ namespace Ui
class StatsDialog;
}
class StatsDialog : public QDialog
class StatsDialog final : public QDialog
{
Q_OBJECT

View file

@ -134,7 +134,7 @@ namespace
map[KEY_TRANSFER_GLOBAL_RATIO] = ((atd > 0) && (atu > 0)) ? Utils::String::fromDouble(static_cast<qreal>(atu) / atd, 2) : "-";
map[KEY_TRANSFER_TOTAL_PEER_CONNECTIONS] = sessionStatus.peersCount;
const qreal readRatio = cacheStatus.readRatio;
const qreal readRatio = cacheStatus.readRatio; // TODO: remove when LIBTORRENT_VERSION_NUM >= 20000
map[KEY_TRANSFER_READ_CACHE_HITS] = (readRatio > 0) ? Utils::String::fromDouble(100 * readRatio, 2) : "0";
map[KEY_TRANSFER_TOTAL_BUFFERS_SIZE] = cacheStatus.totalUsedBuffers * 16 * 1024;