mirror of
https://github.com/nextcloud/desktop.git
synced 2024-11-25 06:25:56 +03:00
Propagator: Fix t8.pl
The test sets OWNCLOUD_MAX_PARALLEL to 1 to disable parallelism. But since the max amount of parallelism is twice as much, that does not work. So change the way we compute the hardMaximumActiveJob: Use the value of OWNCLOUD_MAX_PARALLEL to maximize this amount and base the maximum amount of transfer jobs on it instead of the other way. A result of this change is that, in case of bandwidth limit, we keep the default of 6 non-transfer jobs in parallel. I believe that's fine since the short jobs do not really use bandwidth, so we can still keep the same amount of small jobs.
This commit is contained in:
parent
ee11b5d9be
commit
112a7dba73
3 changed files with 17 additions and 20 deletions
|
@ -40,6 +40,7 @@
|
||||||
#include <QObject>
|
#include <QObject>
|
||||||
#include <QTimerEvent>
|
#include <QTimerEvent>
|
||||||
#include <QDebug>
|
#include <QDebug>
|
||||||
|
#include <qmath.h>
|
||||||
|
|
||||||
namespace OCC {
|
namespace OCC {
|
||||||
|
|
||||||
|
@ -72,32 +73,27 @@ qint64 freeSpaceLimit()
|
||||||
OwncloudPropagator::~OwncloudPropagator()
|
OwncloudPropagator::~OwncloudPropagator()
|
||||||
{}
|
{}
|
||||||
|
|
||||||
/* The maximum number of active jobs in parallel */
|
|
||||||
int OwncloudPropagator::maximumActiveJob()
|
|
||||||
{
|
|
||||||
static int max = qgetenv("OWNCLOUD_MAX_PARALLEL").toUInt();
|
|
||||||
if (!max) {
|
|
||||||
max = 3; //default
|
|
||||||
}
|
|
||||||
|
|
||||||
|
int OwncloudPropagator::maximumActiveTransferJob()
|
||||||
|
{
|
||||||
if (_downloadLimit.fetchAndAddAcquire(0) != 0 || _uploadLimit.fetchAndAddAcquire(0) != 0) {
|
if (_downloadLimit.fetchAndAddAcquire(0) != 0 || _uploadLimit.fetchAndAddAcquire(0) != 0) {
|
||||||
// disable parallelism when there is a network limit.
|
// disable parallelism when there is a network limit.
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
return qCeil(hardMaximumActiveJob()/2.);
|
||||||
return max;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* The maximum number of active jobs in parallel */
|
||||||
int OwncloudPropagator::hardMaximumActiveJob()
|
int OwncloudPropagator::hardMaximumActiveJob()
|
||||||
{
|
{
|
||||||
int max = maximumActiveJob();
|
static int max = qgetenv("OWNCLOUD_MAX_PARALLEL").toUInt();
|
||||||
return max*2;
|
if (!max) {
|
||||||
// FIXME: Wondering if we should hard-limit to 1 if maximumActiveJob() is 1
|
max = 6; //default (Qt cannot do more anyway)
|
||||||
// to support our old use case of limiting concurrency (when "automatic" bandwidth
|
// TODO: increase this number when using HTTP2
|
||||||
// limiting is set. But this causes https://github.com/owncloud/client/issues/4081
|
}
|
||||||
|
return max;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/** Updates, creates or removes a blacklist entry for the given item.
|
/** Updates, creates or removes a blacklist entry for the given item.
|
||||||
*
|
*
|
||||||
* Returns whether the error should be suppressed.
|
* Returns whether the error should be suppressed.
|
||||||
|
@ -525,7 +521,7 @@ void OwncloudPropagator::scheduleNextJob()
|
||||||
// Down-scaling on slow networks? https://github.com/owncloud/client/issues/3382
|
// Down-scaling on slow networks? https://github.com/owncloud/client/issues/3382
|
||||||
// Making sure we do up/down at same time? https://github.com/owncloud/client/issues/1633
|
// Making sure we do up/down at same time? https://github.com/owncloud/client/issues/1633
|
||||||
|
|
||||||
if (_activeJobList.count() < maximumActiveJob()) {
|
if (_activeJobList.count() < maximumActiveTransferJob()) {
|
||||||
if (_rootJob->scheduleNextJob()) {
|
if (_rootJob->scheduleNextJob()) {
|
||||||
QTimer::singleShot(0, this, SLOT(scheduleNextJob()));
|
QTimer::singleShot(0, this, SLOT(scheduleNextJob()));
|
||||||
}
|
}
|
||||||
|
@ -535,12 +531,12 @@ void OwncloudPropagator::scheduleNextJob()
|
||||||
// one that is likely finished quickly, we can launch another one.
|
// one that is likely finished quickly, we can launch another one.
|
||||||
// When a job finishes another one will "move up" to be one of the first 3 and then
|
// When a job finishes another one will "move up" to be one of the first 3 and then
|
||||||
// be counted too.
|
// be counted too.
|
||||||
for (int i = 0; i < maximumActiveJob() && i < _activeJobList.count(); i++) {
|
for (int i = 0; i < maximumActiveTransferJob() && i < _activeJobList.count(); i++) {
|
||||||
if (_activeJobList.at(i)->isLikelyFinishedQuickly()) {
|
if (_activeJobList.at(i)->isLikelyFinishedQuickly()) {
|
||||||
likelyFinishedQuicklyCount++;
|
likelyFinishedQuicklyCount++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (_activeJobList.count() < maximumActiveJob() + likelyFinishedQuicklyCount) {
|
if (_activeJobList.count() < maximumActiveTransferJob() + likelyFinishedQuicklyCount) {
|
||||||
qDebug() << "Can pump in another request! activeJobs =" << _activeJobList.count();
|
qDebug() << "Can pump in another request! activeJobs =" << _activeJobList.count();
|
||||||
if (_rootJob->scheduleNextJob()) {
|
if (_rootJob->scheduleNextJob()) {
|
||||||
QTimer::singleShot(0, this, SLOT(scheduleNextJob()));
|
QTimer::singleShot(0, this, SLOT(scheduleNextJob()));
|
||||||
|
|
|
@ -301,8 +301,9 @@ public:
|
||||||
/** We detected that another sync is required after this one */
|
/** We detected that another sync is required after this one */
|
||||||
bool _anotherSyncNeeded;
|
bool _anotherSyncNeeded;
|
||||||
|
|
||||||
|
/* the maximum number of jobs using bandwidth (uploads or downloads, in parallel) */
|
||||||
|
int maximumActiveTransferJob();
|
||||||
/* The maximum number of active jobs in parallel */
|
/* The maximum number of active jobs in parallel */
|
||||||
int maximumActiveJob();
|
|
||||||
int hardMaximumActiveJob();
|
int hardMaximumActiveJob();
|
||||||
|
|
||||||
bool isInSharedDirectory(const QString& file);
|
bool isInSharedDirectory(const QString& file);
|
||||||
|
|
|
@ -160,7 +160,7 @@ void PropagateUploadFileV1::startNextChunk()
|
||||||
parallelChunkUpload = false;
|
parallelChunkUpload = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (parallelChunkUpload && (propagator()->_activeJobList.count() < propagator()->maximumActiveJob())
|
if (parallelChunkUpload && (propagator()->_activeJobList.count() < propagator()->maximumActiveTransferJob())
|
||||||
&& _currentChunk < _chunkCount ) {
|
&& _currentChunk < _chunkCount ) {
|
||||||
startNextChunk();
|
startNextChunk();
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue