Propagator: Fix t8.pl

The test sets OWNCLOUD_MAX_PARALLEL to 1 to disable parallelism.
But since the max amount of parallelism is twice as much, that does not
work.
So change the way we compute the hardMaximumActiveJob: Use the value of
OWNCLOUD_MAX_PARALLEL to maximize this amount and base the maximum amount
of transfer jobs on it instead of the other way.

A result of this change is that, in case of bandwidth limit, we keep the
default of 6 non-transfer jobs in parallel. I believe that's fine since
the short jobs do not really use bandwidth, so we can still keep the same
amount of small jobs.
This commit is contained in:
Olivier Goffart 2017-02-17 09:28:20 +01:00 committed by Olivier Goffart
parent ee11b5d9be
commit 112a7dba73
3 changed files with 17 additions and 20 deletions

View file

@ -40,6 +40,7 @@
#include <QObject>
#include <QTimerEvent>
#include <QDebug>
#include <qmath.h>
namespace OCC {
@ -72,32 +73,27 @@ qint64 freeSpaceLimit()
OwncloudPropagator::~OwncloudPropagator()
{}
/* The maximum number of active jobs in parallel */
int OwncloudPropagator::maximumActiveJob()
{
static int max = qgetenv("OWNCLOUD_MAX_PARALLEL").toUInt();
if (!max) {
max = 3; //default
}
int OwncloudPropagator::maximumActiveTransferJob()
{
if (_downloadLimit.fetchAndAddAcquire(0) != 0 || _uploadLimit.fetchAndAddAcquire(0) != 0) {
// disable parallelism when there is a network limit.
return 1;
}
return max;
return qCeil(hardMaximumActiveJob()/2.);
}
/* The maximum number of active jobs in parallel */
int OwncloudPropagator::hardMaximumActiveJob()
{
int max = maximumActiveJob();
return max*2;
// FIXME: Wondering if we should hard-limit to 1 if maximumActiveJob() is 1
// to support our old use case of limiting concurrency (when "automatic" bandwidth
// limiting is set. But this causes https://github.com/owncloud/client/issues/4081
static int max = qgetenv("OWNCLOUD_MAX_PARALLEL").toUInt();
if (!max) {
max = 6; //default (Qt cannot do more anyway)
// TODO: increase this number when using HTTP2
}
return max;
}
/** Updates, creates or removes a blacklist entry for the given item.
*
* Returns whether the error should be suppressed.
@ -525,7 +521,7 @@ void OwncloudPropagator::scheduleNextJob()
// Down-scaling on slow networks? https://github.com/owncloud/client/issues/3382
// Making sure we do up/down at same time? https://github.com/owncloud/client/issues/1633
if (_activeJobList.count() < maximumActiveJob()) {
if (_activeJobList.count() < maximumActiveTransferJob()) {
if (_rootJob->scheduleNextJob()) {
QTimer::singleShot(0, this, SLOT(scheduleNextJob()));
}
@ -535,12 +531,12 @@ void OwncloudPropagator::scheduleNextJob()
// one that is likely finished quickly, we can launch another one.
// When a job finishes another one will "move up" to be one of the first 3 and then
// be counted too.
for (int i = 0; i < maximumActiveJob() && i < _activeJobList.count(); i++) {
for (int i = 0; i < maximumActiveTransferJob() && i < _activeJobList.count(); i++) {
if (_activeJobList.at(i)->isLikelyFinishedQuickly()) {
likelyFinishedQuicklyCount++;
}
}
if (_activeJobList.count() < maximumActiveJob() + likelyFinishedQuicklyCount) {
if (_activeJobList.count() < maximumActiveTransferJob() + likelyFinishedQuicklyCount) {
qDebug() << "Can pump in another request! activeJobs =" << _activeJobList.count();
if (_rootJob->scheduleNextJob()) {
QTimer::singleShot(0, this, SLOT(scheduleNextJob()));

View file

@ -301,8 +301,9 @@ public:
/** We detected that another sync is required after this one */
bool _anotherSyncNeeded;
/* the maximum number of jobs using bandwidth (uploads or downloads, in parallel) */
int maximumActiveTransferJob();
/* The maximum number of active jobs in parallel */
int maximumActiveJob();
int hardMaximumActiveJob();
bool isInSharedDirectory(const QString& file);

View file

@ -160,7 +160,7 @@ void PropagateUploadFileV1::startNextChunk()
parallelChunkUpload = false;
}
if (parallelChunkUpload && (propagator()->_activeJobList.count() < propagator()->maximumActiveJob())
if (parallelChunkUpload && (propagator()->_activeJobList.count() < propagator()->maximumActiveTransferJob())
&& _currentChunk < _chunkCount ) {
startNextChunk();
}