mirror of
https://github.com/nextcloud/desktop.git
synced 2024-11-26 15:06:08 +03:00
Upload: Adjust timeout for final job based on size #6527
Some servers have virus scanners and the like that can delay the response of the final chunked upload assembly significantly, often breaking the current 5min (!) timeout. See owncloud/enterprise#2480 for details.
This commit is contained in:
parent
5308fc4148
commit
2638332dc6
4 changed files with 24 additions and 0 deletions
|
@ -545,6 +545,16 @@ void PropagateUploadFileCommon::commonErrorHandling(AbstractNetworkJob *job)
|
||||||
abortWithError(status, errorString);
|
abortWithError(status, errorString);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void PropagateUploadFileCommon::adjustLastJobTimeout(AbstractNetworkJob *job, quint64 fileSize)
|
||||||
|
{
|
||||||
|
job->setTimeout(qBound(
|
||||||
|
job->timeoutMsec(),
|
||||||
|
// Calculate 3 minutes for each gigabyte of data
|
||||||
|
qint64((3 * 60 * 1000) * fileSize / 1e9),
|
||||||
|
// Maximum of 30 minutes
|
||||||
|
qint64(30 * 60 * 1000)));
|
||||||
|
}
|
||||||
|
|
||||||
void PropagateUploadFileCommon::slotJobDestroyed(QObject *job)
|
void PropagateUploadFileCommon::slotJobDestroyed(QObject *job)
|
||||||
{
|
{
|
||||||
_jobs.erase(std::remove(_jobs.begin(), _jobs.end(), job), _jobs.end());
|
_jobs.erase(std::remove(_jobs.begin(), _jobs.end(), job), _jobs.end());
|
||||||
|
|
|
@ -274,6 +274,17 @@ protected:
|
||||||
*/
|
*/
|
||||||
void commonErrorHandling(AbstractNetworkJob *job);
|
void commonErrorHandling(AbstractNetworkJob *job);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Increases the timeout for the final MOVE/PUT for large files.
|
||||||
|
*
|
||||||
|
* This is an unfortunate workaround since the drawback is not being able to
|
||||||
|
* detect real disconnects in a timely manner. Shall go away when the server
|
||||||
|
* response starts coming quicker, or there is some sort of async api.
|
||||||
|
*
|
||||||
|
* See #6527, enterprise#2480
|
||||||
|
*/
|
||||||
|
static void adjustLastJobTimeout(AbstractNetworkJob *job, quint64 fileSize);
|
||||||
|
|
||||||
// Bases headers that need to be sent with every chunk
|
// Bases headers that need to be sent with every chunk
|
||||||
QMap<QByteArray, QByteArray> headers();
|
QMap<QByteArray, QByteArray> headers();
|
||||||
};
|
};
|
||||||
|
|
|
@ -303,6 +303,7 @@ void PropagateUploadFileNG::startNextChunk()
|
||||||
connect(job, &MoveJob::finishedSignal, this, &PropagateUploadFileNG::slotMoveJobFinished);
|
connect(job, &MoveJob::finishedSignal, this, &PropagateUploadFileNG::slotMoveJobFinished);
|
||||||
connect(job, &QObject::destroyed, this, &PropagateUploadFileCommon::slotJobDestroyed);
|
connect(job, &QObject::destroyed, this, &PropagateUploadFileCommon::slotJobDestroyed);
|
||||||
propagator()->_activeJobList.append(this);
|
propagator()->_activeJobList.append(this);
|
||||||
|
adjustLastJobTimeout(job, fileSize);
|
||||||
job->start();
|
job->start();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
|
@ -144,6 +144,8 @@ void PropagateUploadFileV1::startNextChunk()
|
||||||
connect(job, &PUTFileJob::uploadProgress, this, &PropagateUploadFileV1::slotUploadProgress);
|
connect(job, &PUTFileJob::uploadProgress, this, &PropagateUploadFileV1::slotUploadProgress);
|
||||||
connect(job, &PUTFileJob::uploadProgress, device, &UploadDevice::slotJobUploadProgress);
|
connect(job, &PUTFileJob::uploadProgress, device, &UploadDevice::slotJobUploadProgress);
|
||||||
connect(job, &QObject::destroyed, this, &PropagateUploadFileCommon::slotJobDestroyed);
|
connect(job, &QObject::destroyed, this, &PropagateUploadFileCommon::slotJobDestroyed);
|
||||||
|
if (isFinalChunk)
|
||||||
|
adjustLastJobTimeout(job, fileSize);
|
||||||
job->start();
|
job->start();
|
||||||
propagator()->_activeJobList.append(this);
|
propagator()->_activeJobList.append(this);
|
||||||
_currentChunk++;
|
_currentChunk++;
|
||||||
|
|
Loading…
Reference in a new issue