2014-02-06 14:50:16 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) by Olivier Goffart <ogoffart@owncloud.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
|
|
|
|
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* for more details.
|
|
|
|
*/
|
|
|
|
|
2014-11-11 14:16:14 +03:00
|
|
|
#include "propagateupload.h"
|
|
|
|
#include "owncloudpropagator_p.h"
|
2014-02-06 14:50:16 +04:00
|
|
|
#include "networkjobs.h"
|
|
|
|
#include "account.h"
|
|
|
|
#include "syncjournaldb.h"
|
|
|
|
#include "syncjournalfilerecord.h"
|
2014-02-13 17:02:05 +04:00
|
|
|
#include "utility.h"
|
2014-02-18 15:54:40 +04:00
|
|
|
#include "filesystem.h"
|
2014-04-29 18:47:07 +04:00
|
|
|
#include "propagatorjobs.h"
|
2014-07-25 15:30:48 +04:00
|
|
|
#include <json.h>
|
2014-02-06 14:50:16 +04:00
|
|
|
#include <QNetworkAccessManager>
|
2014-02-17 16:48:56 +04:00
|
|
|
#include <QFileInfo>
|
2014-05-23 20:55:44 +04:00
|
|
|
#include <QDir>
|
2014-02-13 17:02:05 +04:00
|
|
|
#include <cmath>
|
2015-01-14 14:48:38 +03:00
|
|
|
#include <cstring>
|
2014-02-06 14:50:16 +04:00
|
|
|
|
2014-12-02 16:20:13 +03:00
|
|
|
namespace OCC {
|
2014-02-06 14:50:16 +04:00
|
|
|
|
2014-09-17 15:35:54 +04:00
|
|
|
/**
|
|
|
|
* The mtime of a file must be at least this many milliseconds in
|
|
|
|
* the past for an upload to be started. Otherwise the propagator will
|
|
|
|
* assume it's still being changed and skip it.
|
|
|
|
*
|
|
|
|
* This value must be smaller than the msBetweenRequestAndSync in
|
|
|
|
* the folder manager.
|
|
|
|
*
|
|
|
|
* Two seconds has shown to be a good value in tests.
|
|
|
|
*/
|
|
|
|
static int minFileAgeForUpload = 2000;
|
|
|
|
|
2014-08-26 14:30:00 +04:00
|
|
|
static qint64 chunkSize() {
|
2014-04-30 19:54:14 +04:00
|
|
|
static uint chunkSize;
|
|
|
|
if (!chunkSize) {
|
|
|
|
chunkSize = qgetenv("OWNCLOUD_CHUNK_SIZE").toUInt();
|
|
|
|
if (chunkSize == 0) {
|
2015-01-14 13:20:09 +03:00
|
|
|
chunkSize = 5*1024*1024; // default to 5 MiB
|
2014-04-30 19:54:14 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return chunkSize;
|
|
|
|
}
|
|
|
|
|
2014-02-06 14:50:16 +04:00
|
|
|
void PUTFileJob::start() {
|
|
|
|
QNetworkRequest req;
|
|
|
|
for(QMap<QByteArray, QByteArray>::const_iterator it = _headers.begin(); it != _headers.end(); ++it) {
|
|
|
|
req.setRawHeader(it.key(), it.value());
|
|
|
|
}
|
|
|
|
|
2014-09-29 12:30:39 +04:00
|
|
|
setReply(davRequest("PUT", path(), req, _device.data()));
|
2014-02-06 14:50:16 +04:00
|
|
|
setupConnections(reply());
|
|
|
|
|
|
|
|
if( reply()->error() != QNetworkReply::NoError ) {
|
2014-03-06 23:33:17 +04:00
|
|
|
qWarning() << Q_FUNC_INFO << " Network error: " << reply()->errorString();
|
2014-02-06 14:50:16 +04:00
|
|
|
}
|
2014-03-14 16:03:16 +04:00
|
|
|
|
|
|
|
connect(reply(), SIGNAL(uploadProgress(qint64,qint64)), this, SIGNAL(uploadProgress(qint64,qint64)));
|
2014-12-18 14:09:48 +03:00
|
|
|
connect(this, SIGNAL(networkActivity()), account().data(), SIGNAL(propagatorNetworkActivity()));
|
2014-03-14 16:03:16 +04:00
|
|
|
|
2014-02-06 14:50:16 +04:00
|
|
|
AbstractNetworkJob::start();
|
|
|
|
}
|
|
|
|
|
2014-04-30 19:54:14 +04:00
|
|
|
void PUTFileJob::slotTimeout() {
|
|
|
|
_errorString = tr("Connection Timeout");
|
|
|
|
reply()->abort();
|
2014-03-28 14:11:02 +04:00
|
|
|
}
|
2014-02-10 16:00:22 +04:00
|
|
|
|
2014-07-25 15:30:48 +04:00
|
|
|
void PollJob::start()
|
|
|
|
{
|
2014-08-28 13:27:08 +04:00
|
|
|
setTimeout(120 * 1000);
|
2014-07-29 21:51:26 +04:00
|
|
|
QUrl accountUrl = account()->url();
|
|
|
|
QUrl finalUrl = QUrl::fromUserInput(accountUrl.scheme() + QLatin1String("://") + accountUrl.authority()
|
2014-11-11 12:10:46 +03:00
|
|
|
+ (path().startsWith('/') ? QLatin1String("") : QLatin1String("/")) + path());
|
2014-07-29 21:51:26 +04:00
|
|
|
setReply(getRequest(finalUrl));
|
|
|
|
setupConnections(reply());
|
2014-07-25 15:30:48 +04:00
|
|
|
connect(reply(), SIGNAL(downloadProgress(qint64,qint64)), this, SLOT(resetTimeout()));
|
|
|
|
AbstractNetworkJob::start();
|
|
|
|
}
|
|
|
|
|
2014-07-28 14:12:52 +04:00
|
|
|
bool PollJob::finished()
|
|
|
|
{
|
|
|
|
QNetworkReply::NetworkError err = reply()->error();
|
|
|
|
if (err != QNetworkReply::NoError) {
|
2014-07-29 17:51:22 +04:00
|
|
|
_item._httpErrorCode = reply()->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt();
|
|
|
|
_item._status = classifyError(err, _item._httpErrorCode);
|
|
|
|
_item._errorString = reply()->errorString();
|
2014-07-29 21:51:26 +04:00
|
|
|
if (_item._status == SyncFileItem::FatalError || _item._httpErrorCode >= 400) {
|
2014-08-28 15:44:14 +04:00
|
|
|
if (_item._status != SyncFileItem::FatalError
|
|
|
|
&& _item._httpErrorCode != 503) {
|
2014-07-29 21:51:26 +04:00
|
|
|
SyncJournalDb::PollInfo info;
|
|
|
|
info._file = _item._file;
|
|
|
|
// no info._url removes it from the database
|
|
|
|
_journal->setPollInfo(info);
|
2014-09-11 14:05:35 +04:00
|
|
|
_journal->commit("remove poll info");
|
2014-07-29 21:51:26 +04:00
|
|
|
|
|
|
|
}
|
2014-07-29 17:51:22 +04:00
|
|
|
emit finishedSignal();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
start();
|
2014-07-28 14:12:52 +04:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ok = false;
|
2014-08-29 15:58:33 +04:00
|
|
|
QByteArray jsonData = reply()->readAll().trimmed();
|
2014-10-07 19:42:50 +04:00
|
|
|
qDebug() << Q_FUNC_INFO << ">" << jsonData << "<" << reply()->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt();
|
2014-08-29 15:58:33 +04:00
|
|
|
QVariantMap status = QtJson::parse(QString::fromUtf8(jsonData), ok).toMap();
|
2014-07-28 14:12:52 +04:00
|
|
|
if (!ok || status.isEmpty()) {
|
2015-02-05 22:41:08 +03:00
|
|
|
_item._errorString = tr("Invalid JSON reply from the poll URL");
|
2014-07-29 17:51:22 +04:00
|
|
|
_item._status = SyncFileItem::NormalError;
|
2014-07-28 14:12:52 +04:00
|
|
|
emit finishedSignal();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-07-29 17:51:22 +04:00
|
|
|
if (status["unfinished"].isValid()) {
|
|
|
|
start();
|
|
|
|
return false;
|
2014-07-28 14:12:52 +04:00
|
|
|
}
|
|
|
|
|
2014-07-29 17:51:22 +04:00
|
|
|
_item._errorString = status["error"].toString();
|
2014-07-29 21:51:26 +04:00
|
|
|
_item._status = _item._errorString.isEmpty() ? SyncFileItem::Success : SyncFileItem::NormalError;
|
2014-07-29 17:51:22 +04:00
|
|
|
_item._fileId = status["fileid"].toByteArray();
|
2014-07-28 14:12:52 +04:00
|
|
|
_item._etag = status["etag"].toByteArray();
|
|
|
|
_item._responseTimeStamp = responseTimestamp();
|
|
|
|
|
|
|
|
SyncJournalDb::PollInfo info;
|
|
|
|
info._file = _item._file;
|
|
|
|
// no info._url removes it from the database
|
|
|
|
_journal->setPollInfo(info);
|
2014-09-11 14:05:35 +04:00
|
|
|
_journal->commit("remove poll info");
|
2014-07-28 14:12:52 +04:00
|
|
|
|
|
|
|
emit finishedSignal();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-07-25 15:30:48 +04:00
|
|
|
|
2014-02-13 17:02:05 +04:00
|
|
|
void PropagateUploadFileQNAM::start()
|
2014-02-10 16:00:22 +04:00
|
|
|
{
|
2014-02-13 17:02:05 +04:00
|
|
|
if (_propagator->_abortRequested.fetchAndAddRelaxed(0))
|
|
|
|
return;
|
2014-02-10 16:00:22 +04:00
|
|
|
|
2015-01-14 14:48:38 +03:00
|
|
|
QFileInfo fi(_propagator->getFilePath(_item._file));
|
2015-02-25 12:51:05 +03:00
|
|
|
if (!FileSystem::fileExists(fi)) {
|
2015-01-14 14:48:38 +03:00
|
|
|
done(SyncFileItem::SoftError, tr("File Removed"));
|
2014-02-13 17:02:05 +04:00
|
|
|
return;
|
2014-02-10 16:00:22 +04:00
|
|
|
}
|
|
|
|
|
2014-09-17 15:35:54 +04:00
|
|
|
// Update the mtime and size, it might have changed since discovery.
|
2015-01-14 14:48:38 +03:00
|
|
|
_item._modtime = FileSystem::getModTime(fi.absoluteFilePath());
|
2015-02-25 12:51:05 +03:00
|
|
|
quint64 fileSize = FileSystem::getSize(fi);
|
2014-09-17 15:35:54 +04:00
|
|
|
_item._size = fileSize;
|
|
|
|
|
|
|
|
// But skip the file if the mtime is too close to 'now'!
|
|
|
|
// That usually indicates a file that is still being changed
|
|
|
|
// or not yet fully copied to the destination.
|
|
|
|
QDateTime modtime = Utility::qDateTimeFromTime_t(_item._modtime);
|
|
|
|
if (modtime.msecsTo(QDateTime::currentDateTime()) < minFileAgeForUpload) {
|
|
|
|
_propagator->_anotherSyncNeeded = true;
|
|
|
|
done(SyncFileItem::SoftError, tr("Local file changed during sync."));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-03-28 14:11:02 +04:00
|
|
|
_chunkCount = std::ceil(fileSize/double(chunkSize()));
|
2014-02-13 17:02:05 +04:00
|
|
|
_startChunk = 0;
|
|
|
|
_transferId = qrand() ^ _item._modtime ^ (_item._size << 16);
|
2014-02-10 16:00:22 +04:00
|
|
|
|
2014-02-13 17:02:05 +04:00
|
|
|
const SyncJournalDb::UploadInfo progressInfo = _propagator->_journal->getUploadInfo(_item._file);
|
2014-02-10 16:00:22 +04:00
|
|
|
|
2014-02-13 17:02:05 +04:00
|
|
|
if (progressInfo._valid && Utility::qDateTimeToTime_t(progressInfo._modtime) == _item._modtime ) {
|
|
|
|
_startChunk = progressInfo._chunk;
|
|
|
|
_transferId = progressInfo._transferid;
|
|
|
|
qDebug() << Q_FUNC_INFO << _item._file << ": Resuming from chunk " << _startChunk;
|
2014-02-10 16:00:22 +04:00
|
|
|
}
|
|
|
|
|
2014-02-13 17:02:05 +04:00
|
|
|
_currentChunk = 0;
|
2014-03-26 20:58:32 +04:00
|
|
|
_duration.start();
|
2014-02-10 16:00:22 +04:00
|
|
|
|
2014-03-14 16:03:16 +04:00
|
|
|
emit progress(_item, 0);
|
2014-02-13 17:02:05 +04:00
|
|
|
this->startNextChunk();
|
|
|
|
}
|
2014-02-10 16:00:22 +04:00
|
|
|
|
2015-01-14 17:14:17 +03:00
|
|
|
UploadDevice::UploadDevice(BandwidthManager *bwm)
|
|
|
|
: _read(0),
|
2014-09-29 12:30:39 +04:00
|
|
|
_bandwidthManager(bwm),
|
|
|
|
_bandwidthQuota(0),
|
|
|
|
_readWithProgress(0),
|
|
|
|
_bandwidthLimited(false), _choked(false)
|
|
|
|
{
|
|
|
|
_bandwidthManager->registerUploadDevice(this);
|
|
|
|
}
|
2014-02-10 16:00:22 +04:00
|
|
|
|
|
|
|
|
2014-09-29 12:30:39 +04:00
|
|
|
UploadDevice::~UploadDevice() {
|
2015-01-14 16:27:24 +03:00
|
|
|
if (_bandwidthManager) {
|
|
|
|
_bandwidthManager->unregisterUploadDevice(this);
|
|
|
|
}
|
2014-09-29 12:30:39 +04:00
|
|
|
}
|
2014-02-13 17:02:05 +04:00
|
|
|
|
2015-01-14 17:14:17 +03:00
|
|
|
bool UploadDevice::prepareAndOpen(const QString& fileName, qint64 start, qint64 size)
|
2015-01-14 14:48:38 +03:00
|
|
|
{
|
|
|
|
_data.clear();
|
2015-01-14 17:14:17 +03:00
|
|
|
_read = 0;
|
|
|
|
|
|
|
|
QFile file(fileName);
|
|
|
|
QString openError;
|
|
|
|
if (!FileSystem::openFileSharedRead(&file, &openError)) {
|
|
|
|
setErrorString(openError);
|
2015-01-14 14:48:38 +03:00
|
|
|
return false;
|
|
|
|
}
|
2015-01-14 17:14:17 +03:00
|
|
|
|
2015-02-12 13:02:56 +03:00
|
|
|
size = qMin(FileSystem::getSize(fileName), size);
|
2015-01-14 17:14:17 +03:00
|
|
|
_data.resize(size);
|
|
|
|
if (!file.seek(start)) {
|
|
|
|
setErrorString(file.errorString());
|
2015-01-14 14:48:38 +03:00
|
|
|
return false;
|
|
|
|
}
|
2015-01-14 17:14:17 +03:00
|
|
|
auto read = file.read(_data.data(), size);
|
|
|
|
if (read != size) {
|
|
|
|
setErrorString(file.errorString());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return QIODevice::open(QIODevice::ReadOnly);
|
2015-01-14 14:48:38 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-09-29 12:30:39 +04:00
|
|
|
qint64 UploadDevice::writeData(const char* , qint64 ) {
|
|
|
|
Q_ASSERT(!"write to read only device");
|
|
|
|
return 0;
|
|
|
|
}
|
2014-09-15 19:55:55 +04:00
|
|
|
|
2014-09-29 12:30:39 +04:00
|
|
|
qint64 UploadDevice::readData(char* data, qint64 maxlen) {
|
2014-10-07 19:42:50 +04:00
|
|
|
//qDebug() << Q_FUNC_INFO << maxlen << _read << _size << _bandwidthQuota;
|
2015-01-14 17:14:17 +03:00
|
|
|
if (_data.size() - _read <= 0) {
|
2014-09-29 12:30:39 +04:00
|
|
|
// at end
|
2015-02-06 13:19:41 +03:00
|
|
|
if (_bandwidthManager) {
|
|
|
|
_bandwidthManager->unregisterUploadDevice(this);
|
|
|
|
}
|
2014-09-29 12:30:39 +04:00
|
|
|
return -1;
|
|
|
|
}
|
2015-01-14 17:14:17 +03:00
|
|
|
maxlen = qMin(maxlen, _data.size() - _read);
|
2014-09-29 12:30:39 +04:00
|
|
|
if (maxlen == 0) {
|
2014-02-13 17:02:05 +04:00
|
|
|
return 0;
|
2014-02-10 16:00:22 +04:00
|
|
|
}
|
2014-09-29 12:30:39 +04:00
|
|
|
if (isChoked()) {
|
|
|
|
return 0;
|
2014-02-10 16:00:22 +04:00
|
|
|
}
|
2014-09-29 12:30:39 +04:00
|
|
|
if (isBandwidthLimited()) {
|
|
|
|
maxlen = qMin(maxlen, _bandwidthQuota);
|
|
|
|
if (maxlen <= 0) { // no quota
|
2014-11-24 13:58:29 +03:00
|
|
|
qDebug() << "no quota";
|
2014-09-29 12:30:39 +04:00
|
|
|
return 0;
|
2014-07-14 21:53:42 +04:00
|
|
|
}
|
2014-09-29 12:30:39 +04:00
|
|
|
_bandwidthQuota -= maxlen;
|
2014-02-13 17:02:05 +04:00
|
|
|
}
|
2015-01-14 14:48:38 +03:00
|
|
|
std::memcpy(data, _data.data()+_read, maxlen);
|
|
|
|
_read += maxlen;
|
|
|
|
return maxlen;
|
2014-09-29 12:30:39 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void UploadDevice::slotJobUploadProgress(qint64 sent, qint64 t)
|
2014-11-11 14:16:14 +03:00
|
|
|
{
|
2014-10-07 19:42:50 +04:00
|
|
|
//qDebug() << Q_FUNC_INFO << sent << _read << t << _size << _bandwidthQuota;
|
2014-09-29 12:30:39 +04:00
|
|
|
if (sent == 0 || t == 0) {
|
|
|
|
return;
|
2014-04-04 17:41:35 +04:00
|
|
|
}
|
2014-09-29 12:30:39 +04:00
|
|
|
_readWithProgress = sent;
|
|
|
|
}
|
2014-04-04 17:41:35 +04:00
|
|
|
|
2014-09-29 12:30:39 +04:00
|
|
|
bool UploadDevice::atEnd() const {
|
2015-01-14 17:14:17 +03:00
|
|
|
return _read >= _data.size();
|
2014-09-29 12:30:39 +04:00
|
|
|
}
|
2014-04-05 16:25:41 +04:00
|
|
|
|
2014-09-29 12:30:39 +04:00
|
|
|
qint64 UploadDevice::size() const{
|
2014-10-07 19:42:50 +04:00
|
|
|
// qDebug() << this << Q_FUNC_INFO << _size;
|
2015-01-14 17:14:17 +03:00
|
|
|
return _data.size();
|
2014-09-29 12:30:39 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
qint64 UploadDevice::bytesAvailable() const
|
|
|
|
{
|
2014-10-07 19:42:50 +04:00
|
|
|
// qDebug() << this << Q_FUNC_INFO << _size << _read << QIODevice::bytesAvailable()
|
|
|
|
// << _size - _read + QIODevice::bytesAvailable();
|
2015-01-14 17:14:17 +03:00
|
|
|
return _data.size() - _read + QIODevice::bytesAvailable();
|
2014-09-29 12:30:39 +04:00
|
|
|
}
|
2014-04-05 16:25:41 +04:00
|
|
|
|
2014-09-29 12:30:39 +04:00
|
|
|
// random access, we can seek
|
|
|
|
bool UploadDevice::isSequential() const{
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool UploadDevice::seek ( qint64 pos ) {
|
2015-01-14 17:14:17 +03:00
|
|
|
if (! QIODevice::seek(pos)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (pos < 0 || pos > _data.size()) {
|
|
|
|
return false;
|
|
|
|
}
|
2014-09-29 12:30:39 +04:00
|
|
|
_read = pos;
|
2015-01-14 17:07:41 +03:00
|
|
|
return true;
|
2014-09-29 12:30:39 +04:00
|
|
|
}
|
2014-04-04 17:41:35 +04:00
|
|
|
|
2014-09-29 12:30:39 +04:00
|
|
|
void UploadDevice::giveBandwidthQuota(qint64 bwq) {
|
|
|
|
if (!atEnd()) {
|
|
|
|
_bandwidthQuota = bwq;
|
|
|
|
QMetaObject::invokeMethod(this, "readyRead", Qt::QueuedConnection); // tell QNAM that we have quota
|
2014-04-04 17:41:35 +04:00
|
|
|
}
|
2014-09-29 12:30:39 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void UploadDevice::setBandwidthLimited(bool b) {
|
|
|
|
_bandwidthLimited = b;
|
|
|
|
QMetaObject::invokeMethod(this, "readyRead", Qt::QueuedConnection);
|
|
|
|
}
|
|
|
|
|
|
|
|
void UploadDevice::setChoked(bool b) {
|
|
|
|
_choked = b;
|
|
|
|
if (!_choked) {
|
|
|
|
QMetaObject::invokeMethod(this, "readyRead", Qt::QueuedConnection);
|
|
|
|
}
|
|
|
|
}
|
2014-02-06 14:50:16 +04:00
|
|
|
|
2014-02-13 17:02:05 +04:00
|
|
|
void PropagateUploadFileQNAM::startNextChunk()
|
2014-02-06 14:50:16 +04:00
|
|
|
{
|
2014-02-17 16:48:56 +04:00
|
|
|
if (_propagator->_abortRequested.fetchAndAddRelaxed(0))
|
|
|
|
return;
|
|
|
|
|
2014-09-15 19:55:55 +04:00
|
|
|
if (! _jobs.isEmpty() && _currentChunk + _startChunk >= _chunkCount - 1) {
|
|
|
|
// Don't do parallel upload of chunk if this might be the last chunk because the server cannot handle that
|
|
|
|
// https://github.com/owncloud/core/issues/11106
|
|
|
|
// We return now and when the _jobs will be finished we will proceed the last chunk
|
|
|
|
return;
|
|
|
|
}
|
2014-02-13 17:02:05 +04:00
|
|
|
quint64 fileSize = _item._size;
|
|
|
|
QMap<QByteArray, QByteArray> headers;
|
|
|
|
headers["OC-Total-Length"] = QByteArray::number(fileSize);
|
2014-07-25 15:30:48 +04:00
|
|
|
headers["OC-Async"] = "1";
|
2014-12-01 16:41:39 +03:00
|
|
|
headers["OC-Chunk-Size"]= QByteArray::number(quint64(chunkSize()));
|
2014-02-13 17:02:05 +04:00
|
|
|
headers["Content-Type"] = "application/octet-stream";
|
|
|
|
headers["X-OC-Mtime"] = QByteArray::number(qint64(_item._modtime));
|
2014-07-15 15:33:13 +04:00
|
|
|
if (!_item._etag.isEmpty() && _item._etag != "empty_etag" &&
|
|
|
|
_item._instruction != CSYNC_INSTRUCTION_NEW // On new files never send a If-Match
|
|
|
|
) {
|
2014-02-13 17:02:05 +04:00
|
|
|
// We add quotes because the owncloud server always add quotes around the etag, and
|
|
|
|
// csync_owncloud.c's owncloud_file_id always strip the quotes.
|
|
|
|
headers["If-Match"] = '"' + _item._etag + '"';
|
2014-02-06 14:50:16 +04:00
|
|
|
}
|
|
|
|
|
2014-02-13 17:02:05 +04:00
|
|
|
QString path = _item._file;
|
2015-01-14 14:48:38 +03:00
|
|
|
|
2015-01-14 17:14:17 +03:00
|
|
|
UploadDevice *device = new UploadDevice(&_propagator->_bandwidthManager);
|
|
|
|
qint64 chunkStart = 0;
|
|
|
|
qint64 currentChunkSize = fileSize;
|
2014-02-13 17:02:05 +04:00
|
|
|
if (_chunkCount > 1) {
|
|
|
|
int sendingChunk = (_currentChunk + _startChunk) % _chunkCount;
|
2014-03-28 14:11:02 +04:00
|
|
|
// XOR with chunk size to make sure everything goes well if chunk size change between runs
|
|
|
|
uint transid = _transferId ^ chunkSize();
|
|
|
|
path += QString("-chunking-%1-%2-%3").arg(transid).arg(_chunkCount).arg(sendingChunk);
|
2015-01-14 17:14:17 +03:00
|
|
|
|
2014-02-13 17:02:05 +04:00
|
|
|
headers["OC-Chunked"] = "1";
|
2015-01-14 17:14:17 +03:00
|
|
|
|
|
|
|
chunkStart = chunkSize() * quint64(sendingChunk);
|
|
|
|
currentChunkSize = chunkSize();
|
2014-04-07 18:29:06 +04:00
|
|
|
if (sendingChunk == _chunkCount - 1) { // last chunk
|
2014-04-04 17:41:35 +04:00
|
|
|
currentChunkSize = (fileSize % chunkSize());
|
2014-04-07 18:29:06 +04:00
|
|
|
if( currentChunkSize == 0 ) { // if the last chunk pretents to be 0, its actually the full chunk size.
|
|
|
|
currentChunkSize = chunkSize();
|
|
|
|
}
|
|
|
|
}
|
2014-06-16 15:35:50 +04:00
|
|
|
}
|
|
|
|
|
2015-01-14 17:14:17 +03:00
|
|
|
if (! device->prepareAndOpen(_propagator->getFilePath(_item._file), chunkStart, currentChunkSize)) {
|
|
|
|
qDebug() << "ERR: Could not prepare upload device: " << device->errorString();
|
|
|
|
// Soft error because this is likely caused by the user modifying his files while syncing
|
|
|
|
abortWithError( SyncFileItem::SoftError, device->errorString() );
|
2014-06-20 12:34:07 +04:00
|
|
|
delete device;
|
2014-06-16 15:35:50 +04:00
|
|
|
return;
|
|
|
|
}
|
2015-01-14 17:14:17 +03:00
|
|
|
|
|
|
|
PUTFileJob* job = new PUTFileJob(_propagator->account(), _propagator->_remoteFolder + path, device, headers, _currentChunk);
|
|
|
|
_jobs.append(job);
|
|
|
|
connect(job, SIGNAL(finishedSignal()), this, SLOT(slotPutFinished()));
|
|
|
|
connect(job, SIGNAL(uploadProgress(qint64,qint64)), this, SLOT(slotUploadProgress(qint64,qint64)));
|
|
|
|
connect(job, SIGNAL(uploadProgress(qint64,qint64)), device, SLOT(slotJobUploadProgress(qint64,qint64)));
|
|
|
|
connect(job, SIGNAL(destroyed(QObject*)), this, SLOT(slotJobDestroyed(QObject*)));
|
|
|
|
job->start();
|
|
|
|
_propagator->_activeJobs++;
|
|
|
|
_currentChunk++;
|
|
|
|
|
2015-02-12 15:27:52 +03:00
|
|
|
bool parallelChunkUpload = true;
|
2015-01-14 17:14:17 +03:00
|
|
|
QByteArray env = qgetenv("OWNCLOUD_PARALLEL_CHUNK");
|
2015-02-12 15:27:52 +03:00
|
|
|
if (!env.isEmpty()) {
|
|
|
|
parallelChunkUpload = env != "false" && env != "0";
|
|
|
|
} else {
|
|
|
|
auto version = _propagator->account()->serverVersion();
|
|
|
|
auto dotPos = version.indexOf('.');
|
|
|
|
if (dotPos > 0) {
|
|
|
|
if (version.leftRef(dotPos)
|
|
|
|
#if QT_VERSION < QT_VERSION_CHECK(5, 1, 0)
|
|
|
|
.toString() // QStringRef::toInt was added in Qt 5.1
|
|
|
|
#endif
|
|
|
|
.toInt() < 8) {
|
|
|
|
|
|
|
|
// Disable parallel chunk upload on older sever to avoid too many
|
|
|
|
// internal sever errors (#2743)
|
|
|
|
parallelChunkUpload = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-01-14 17:14:17 +03:00
|
|
|
if (_currentChunk + _startChunk >= _chunkCount - 1) {
|
|
|
|
// Don't do parallel upload of chunk if this might be the last chunk because the server cannot handle that
|
|
|
|
// https://github.com/owncloud/core/issues/11106
|
|
|
|
parallelChunkUpload = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (parallelChunkUpload && (_propagator->_activeJobs < _propagator->maximumActiveJob())
|
|
|
|
&& _currentChunk < _chunkCount ) {
|
|
|
|
startNextChunk();
|
|
|
|
}
|
|
|
|
if (!parallelChunkUpload || _chunkCount - _currentChunk <= 0) {
|
|
|
|
emit ready();
|
|
|
|
}
|
2014-02-06 14:50:16 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void PropagateUploadFileQNAM::slotPutFinished()
|
|
|
|
{
|
|
|
|
PUTFileJob *job = qobject_cast<PUTFileJob *>(sender());
|
|
|
|
Q_ASSERT(job);
|
2014-09-15 19:55:55 +04:00
|
|
|
slotJobDestroyed(job); // remove it from the _jobs list
|
2014-02-06 14:50:16 +04:00
|
|
|
|
2014-04-04 17:41:35 +04:00
|
|
|
qDebug() << Q_FUNC_INFO << job->reply()->request().url() << "FINISHED WITH STATUS"
|
|
|
|
<< job->reply()->error()
|
|
|
|
<< (job->reply()->error() == QNetworkReply::NoError ? QLatin1String("") : job->reply()->errorString())
|
|
|
|
<< job->reply()->attribute(QNetworkRequest::HttpStatusCodeAttribute)
|
|
|
|
<< job->reply()->attribute(QNetworkRequest::HttpReasonPhraseAttribute);
|
2014-02-06 17:52:56 +04:00
|
|
|
|
2014-09-15 19:55:55 +04:00
|
|
|
_propagator->_activeJobs--;
|
|
|
|
|
|
|
|
if (_finished) {
|
|
|
|
// We have send the finished signal already. We don't need to handle any remaining jobs
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-02-06 14:50:16 +04:00
|
|
|
QNetworkReply::NetworkError err = job->reply()->error();
|
|
|
|
if (err != QNetworkReply::NoError) {
|
|
|
|
_item._httpErrorCode = job->reply()->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt();
|
2014-02-27 15:02:22 +04:00
|
|
|
if(checkForProblemsWithShared(_item._httpErrorCode,
|
|
|
|
tr("The file was edited locally but is part of a read only share. "
|
|
|
|
"It is restored and your edit is in the conflict file."))) {
|
|
|
|
return;
|
|
|
|
}
|
2014-04-30 19:54:14 +04:00
|
|
|
QString errorString = job->errorString();
|
2014-02-27 15:02:22 +04:00
|
|
|
|
2014-03-26 15:02:22 +04:00
|
|
|
QByteArray replyContent = job->reply()->readAll();
|
|
|
|
qDebug() << replyContent; // display the XML error in the debug
|
|
|
|
QRegExp rx("<s:message>(.*)</s:message>"); // Issue #1366: display server exception
|
|
|
|
if (rx.indexIn(QString::fromUtf8(replyContent)) != -1) {
|
|
|
|
errorString += QLatin1String(" (") + rx.cap(1) + QLatin1Char(')');
|
|
|
|
}
|
|
|
|
|
2014-06-03 19:22:40 +04:00
|
|
|
if (_item._httpErrorCode == 412) {
|
|
|
|
// Precondition Failed: Maybe the bad etag is in the database, we need to clear the
|
|
|
|
// parent folder etag so we won't read from DB next sync.
|
|
|
|
_propagator->_journal->avoidReadFromDbOnNextSync(_item._file);
|
2014-10-11 17:39:35 +04:00
|
|
|
_propagator->_anotherSyncNeeded = true;
|
2014-06-03 19:22:40 +04:00
|
|
|
}
|
|
|
|
|
2015-01-14 14:48:38 +03:00
|
|
|
abortWithError(classifyError(err, _item._httpErrorCode), errorString);
|
2014-02-06 14:50:16 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-07-25 15:30:48 +04:00
|
|
|
_item._httpErrorCode = job->reply()->attribute(QNetworkRequest::HttpStatusCodeAttribute).toInt();
|
|
|
|
// The server needs some time to process the request and provide with a poll URL
|
|
|
|
if (_item._httpErrorCode == 202) {
|
2014-09-15 19:55:55 +04:00
|
|
|
_finished = true;
|
2014-07-29 21:51:26 +04:00
|
|
|
QString path = QString::fromUtf8(job->reply()->rawHeader("OC-Finish-Poll"));
|
2014-07-25 15:30:48 +04:00
|
|
|
if (path.isEmpty()) {
|
|
|
|
done(SyncFileItem::NormalError, tr("Poll URL missing"));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
startPollJob(path);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-10-29 15:10:56 +03:00
|
|
|
// Check the file again post upload.
|
|
|
|
// Two cases must be considered separately: If the upload is finished,
|
|
|
|
// the file is on the server and has a changed ETag. In that case,
|
|
|
|
// the etag has to be properly updated in the client journal, and because
|
|
|
|
// of that we can bail out here with an error. But we can reschedule a
|
|
|
|
// sync ASAP.
|
|
|
|
// But if the upload is ongoing, because not all chunks were uploaded
|
|
|
|
// yet, the upload can be stopped and an error can be displayed, because
|
|
|
|
// the server hasn't registered the new file yet.
|
2014-08-29 18:23:44 +04:00
|
|
|
bool finished = job->reply()->hasRawHeader("ETag")
|
|
|
|
|| job->reply()->hasRawHeader("OC-ETag");
|
2014-03-06 19:04:32 +04:00
|
|
|
|
2014-11-08 12:48:36 +03:00
|
|
|
|
2014-10-29 15:10:56 +03:00
|
|
|
QFileInfo fi(_propagator->getFilePath(_item._file));
|
|
|
|
|
|
|
|
// Check if the file still exists
|
2015-02-25 12:51:05 +03:00
|
|
|
if( !FileSystem::fileExists(fi) ) {
|
2014-11-08 13:11:05 +03:00
|
|
|
if (!finished) {
|
2015-01-14 14:48:38 +03:00
|
|
|
abortWithError(SyncFileItem::SoftError, tr("The local file was removed during sync."));
|
2014-06-16 15:34:59 +04:00
|
|
|
return;
|
2014-10-29 15:10:56 +03:00
|
|
|
} else {
|
|
|
|
_propagator->_anotherSyncNeeded = true;
|
2014-06-16 15:34:59 +04:00
|
|
|
}
|
2014-10-29 15:10:56 +03:00
|
|
|
}
|
2014-03-06 19:04:32 +04:00
|
|
|
|
2014-10-29 15:10:56 +03:00
|
|
|
// compare expected and real modification time of the file and size
|
|
|
|
const time_t new_mtime = FileSystem::getModTime(fi.absoluteFilePath());
|
2015-02-25 12:51:05 +03:00
|
|
|
const quint64 new_size = static_cast<quint64>(FileSystem::getSize(fi));
|
2014-10-29 15:10:56 +03:00
|
|
|
if (new_mtime != _item._modtime || new_size != _item._size) {
|
|
|
|
qDebug() << "The local file has changed during upload:"
|
|
|
|
<< "mtime: " << _item._modtime << "<->" << new_mtime
|
|
|
|
<< ", size: " << _item._size << "<->" << new_size
|
|
|
|
<< ", QFileInfo: " << Utility::qDateTimeToTime_t(fi.lastModified()) << fi.lastModified();
|
|
|
|
_propagator->_anotherSyncNeeded = true;
|
|
|
|
if( !finished ) {
|
2015-01-14 14:48:38 +03:00
|
|
|
abortWithError(SyncFileItem::SoftError, tr("Local file changed during sync."));
|
2014-03-06 23:33:17 +04:00
|
|
|
// FIXME: the legacy code was retrying for a few seconds.
|
|
|
|
// and also checking that after the last chunk, and removed the file in case of INSTRUCTION_NEW
|
2014-03-06 19:04:32 +04:00
|
|
|
return;
|
|
|
|
}
|
2014-10-29 15:10:56 +03:00
|
|
|
}
|
2014-03-06 19:04:32 +04:00
|
|
|
|
2014-10-29 15:10:56 +03:00
|
|
|
if (!finished) {
|
2014-02-13 17:02:05 +04:00
|
|
|
// Proceed to next chunk.
|
|
|
|
if (_currentChunk >= _chunkCount) {
|
2014-09-15 19:55:55 +04:00
|
|
|
if (!_jobs.empty()) {
|
|
|
|
// just wait for the other job to finish.
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
_finished = true;
|
2014-05-24 17:04:42 +04:00
|
|
|
done(SyncFileItem::NormalError, tr("The server did not acknowledge the last chunk. (No e-tag were present)"));
|
2014-02-13 17:02:05 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
SyncJournalDb::UploadInfo pi;
|
|
|
|
pi._valid = true;
|
2014-09-25 13:32:54 +04:00
|
|
|
auto currentChunk = job->_chunk;
|
2014-09-15 19:55:55 +04:00
|
|
|
foreach (auto *job, _jobs) {
|
2014-09-25 13:32:54 +04:00
|
|
|
// Take the minimum finished one
|
2015-02-04 20:49:26 +03:00
|
|
|
currentChunk = qMin(currentChunk, job->_chunk - 1);
|
2014-09-15 19:55:55 +04:00
|
|
|
}
|
2014-09-25 13:32:54 +04:00
|
|
|
pi._chunk = (currentChunk + _startChunk + 1) % _chunkCount ; // next chunk to start with
|
2014-02-13 17:02:05 +04:00
|
|
|
pi._transferid = _transferId;
|
|
|
|
pi._modtime = Utility::qDateTimeFromTime_t(_item._modtime);
|
|
|
|
_propagator->_journal->setUploadInfo(_item._file, pi);
|
|
|
|
_propagator->_journal->commit("Upload info");
|
|
|
|
startNextChunk();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-05-06 11:30:36 +04:00
|
|
|
// the following code only happens after all chunks were uploaded.
|
2014-09-15 19:55:55 +04:00
|
|
|
_finished = true;
|
2014-02-06 14:50:16 +04:00
|
|
|
// the file id should only be empty for new files up- or downloaded
|
2014-03-24 15:21:44 +04:00
|
|
|
QByteArray fid = job->reply()->rawHeader("OC-FileID");
|
2014-02-06 14:50:16 +04:00
|
|
|
if( !fid.isEmpty() ) {
|
|
|
|
if( !_item._fileId.isEmpty() && _item._fileId != fid ) {
|
|
|
|
qDebug() << "WARN: File ID changed!" << _item._fileId << fid;
|
|
|
|
}
|
|
|
|
_item._fileId = fid;
|
|
|
|
}
|
|
|
|
|
2014-11-11 14:16:14 +03:00
|
|
|
QByteArray etag = getEtagFromReply(job->reply());
|
2014-08-29 18:23:44 +04:00
|
|
|
_item._etag = etag;
|
|
|
|
|
2014-04-29 18:47:07 +04:00
|
|
|
_item._responseTimeStamp = job->responseTimestamp();
|
2014-02-06 14:50:16 +04:00
|
|
|
|
|
|
|
if (job->reply()->rawHeader("X-OC-MTime") != "accepted") {
|
2014-04-29 18:47:07 +04:00
|
|
|
// X-OC-MTime is supported since owncloud 5.0. But not when chunking.
|
|
|
|
// Normaly Owncloud 6 always put X-OC-MTime
|
2015-01-23 19:09:48 +03:00
|
|
|
qWarning() << "Server do not support X-OC-MTime" << job->reply()->rawHeader("X-OC-MTime");
|
|
|
|
#ifdef USE_NEON
|
2014-04-29 18:47:07 +04:00
|
|
|
PropagatorJob *newJob = new UpdateMTimeAndETagJob(_propagator, _item);
|
2014-04-30 12:10:32 +04:00
|
|
|
QObject::connect(newJob, SIGNAL(completed(SyncFileItem)), this, SLOT(finalize(SyncFileItem)));
|
2014-04-29 18:47:07 +04:00
|
|
|
QMetaObject::invokeMethod(newJob, "start");
|
2014-02-06 14:50:16 +04:00
|
|
|
return;
|
2015-01-23 19:09:48 +03:00
|
|
|
#else
|
|
|
|
// Well, the mtime was not set
|
|
|
|
#endif
|
2014-02-06 14:50:16 +04:00
|
|
|
}
|
2014-04-30 12:10:32 +04:00
|
|
|
finalize(_item);
|
2014-04-29 18:47:07 +04:00
|
|
|
}
|
|
|
|
|
2014-04-30 12:10:32 +04:00
|
|
|
void PropagateUploadFileQNAM::finalize(const SyncFileItem ©)
|
2014-04-29 18:47:07 +04:00
|
|
|
{
|
2014-04-30 12:10:32 +04:00
|
|
|
// Normally, copy == _item, but when it comes from the UpdateMTimeAndETagJob, we need to do
|
|
|
|
// some updates
|
|
|
|
_item._etag = copy._etag;
|
|
|
|
_item._fileId = copy._fileId;
|
|
|
|
|
2014-03-26 20:58:32 +04:00
|
|
|
_item._requestDuration = _duration.elapsed();
|
2014-02-06 14:50:16 +04:00
|
|
|
|
2014-09-03 14:11:03 +04:00
|
|
|
_propagator->_journal->setFileRecord(SyncJournalFileRecord(_item, _propagator->getFilePath(_item._file)));
|
2014-02-06 14:50:16 +04:00
|
|
|
// Remove from the progress database:
|
|
|
|
_propagator->_journal->setUploadInfo(_item._file, SyncJournalDb::UploadInfo());
|
|
|
|
_propagator->_journal->commit("upload file start");
|
|
|
|
|
2014-11-24 13:58:51 +03:00
|
|
|
_finished = true;
|
2014-02-06 14:50:16 +04:00
|
|
|
done(SyncFileItem::Success);
|
|
|
|
}
|
|
|
|
|
2014-11-08 13:11:25 +03:00
|
|
|
void PropagateUploadFileQNAM::slotUploadProgress(qint64 sent, qint64)
|
2014-03-14 16:03:16 +04:00
|
|
|
{
|
2014-09-15 19:55:55 +04:00
|
|
|
int progressChunk = _currentChunk + _startChunk - 1;
|
2014-03-14 16:03:16 +04:00
|
|
|
if (progressChunk >= _chunkCount)
|
2014-09-15 19:55:55 +04:00
|
|
|
progressChunk = _currentChunk - 1;
|
2014-09-18 14:36:30 +04:00
|
|
|
quint64 amount = progressChunk * chunkSize();
|
|
|
|
sender()->setProperty("byteWritten", sent);
|
2014-09-29 12:30:39 +04:00
|
|
|
if (_jobs.count() > 1) {
|
2014-09-18 14:36:30 +04:00
|
|
|
amount -= (_jobs.count() -1) * chunkSize();
|
|
|
|
foreach (QObject *j, _jobs) {
|
|
|
|
amount += j->property("byteWritten").toULongLong();
|
|
|
|
}
|
2015-01-05 19:32:12 +03:00
|
|
|
} else {
|
|
|
|
amount += sent;
|
2014-09-18 14:36:30 +04:00
|
|
|
}
|
|
|
|
emit progress(_item, amount);
|
2014-03-14 16:03:16 +04:00
|
|
|
}
|
|
|
|
|
2014-07-25 15:30:48 +04:00
|
|
|
void PropagateUploadFileQNAM::startPollJob(const QString& path)
|
|
|
|
{
|
2014-12-18 14:09:48 +03:00
|
|
|
PollJob* job = new PollJob(_propagator->account(), path, _item,
|
2014-07-28 14:12:52 +04:00
|
|
|
_propagator->_journal, _propagator->_localDir, this);
|
|
|
|
connect(job, SIGNAL(finishedSignal()), SLOT(slotPollFinished()));
|
|
|
|
SyncJournalDb::PollInfo info;
|
|
|
|
info._file = _item._file;
|
|
|
|
info._url = path;
|
|
|
|
info._modtime = _item._modtime;
|
|
|
|
_propagator->_journal->setPollInfo(info);
|
2014-09-11 14:05:35 +04:00
|
|
|
_propagator->_journal->commit("add poll info");
|
2014-11-24 15:33:13 +03:00
|
|
|
_propagator->_activeJobs++;
|
2014-07-29 21:51:26 +04:00
|
|
|
job->start();
|
2014-07-25 15:30:48 +04:00
|
|
|
}
|
|
|
|
|
2014-07-28 14:12:52 +04:00
|
|
|
void PropagateUploadFileQNAM::slotPollFinished()
|
2014-07-25 15:30:48 +04:00
|
|
|
{
|
|
|
|
PollJob *job = qobject_cast<PollJob *>(sender());
|
|
|
|
Q_ASSERT(job);
|
|
|
|
|
2014-11-24 15:33:13 +03:00
|
|
|
_propagator->_activeJobs--;
|
|
|
|
|
2014-07-29 17:51:22 +04:00
|
|
|
if (job->_item._status != SyncFileItem::Success) {
|
2014-11-24 13:58:51 +03:00
|
|
|
_finished = true;
|
2014-07-29 17:51:22 +04:00
|
|
|
done(job->_item._status, job->_item._errorString);
|
2014-07-25 15:30:48 +04:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-07-28 14:12:52 +04:00
|
|
|
finalize(job->_item);
|
2014-07-25 15:30:48 +04:00
|
|
|
}
|
2014-03-14 16:03:16 +04:00
|
|
|
|
2014-09-15 19:55:55 +04:00
|
|
|
void PropagateUploadFileQNAM::slotJobDestroyed(QObject* job)
|
|
|
|
{
|
|
|
|
_jobs.erase(std::remove(_jobs.begin(), _jobs.end(), job) , _jobs.end());
|
|
|
|
}
|
2014-03-14 16:03:16 +04:00
|
|
|
|
2014-02-06 17:52:56 +04:00
|
|
|
void PropagateUploadFileQNAM::abort()
|
|
|
|
{
|
2014-09-15 19:55:55 +04:00
|
|
|
foreach(auto *job, _jobs) {
|
|
|
|
if (job->reply()) {
|
|
|
|
qDebug() << Q_FUNC_INFO << job << this->_item._file;
|
|
|
|
job->reply()->abort();
|
|
|
|
}
|
2014-04-05 16:25:41 +04:00
|
|
|
}
|
2014-02-06 17:52:56 +04:00
|
|
|
}
|
|
|
|
|
2015-01-14 14:48:38 +03:00
|
|
|
// This function is used whenever there is an error occuring and jobs might be in progress
|
|
|
|
void PropagateUploadFileQNAM::abortWithError(SyncFileItem::Status status, const QString &error)
|
|
|
|
{
|
|
|
|
_finished = true;
|
|
|
|
abort();
|
|
|
|
done(status, error);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-12-10 15:01:36 +03:00
|
|
|
}
|