2013-05-03 21:11:00 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) by Olivier Goffart <ogoffart@owncloud.com>
|
|
|
|
* Copyright (C) by Klaas Freitag <freitag@owncloud.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
|
|
|
|
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* for more details.
|
|
|
|
*/
|
|
|
|
|
2014-02-18 14:52:38 +04:00
|
|
|
#include "owncloudpropagator.h"
|
2013-10-16 13:59:54 +04:00
|
|
|
#include "syncjournaldb.h"
|
2013-10-28 13:47:10 +04:00
|
|
|
#include "syncjournalfilerecord.h"
|
2014-11-11 14:16:14 +03:00
|
|
|
#include "propagatedownload.h"
|
|
|
|
#include "propagateupload.h"
|
2014-11-11 15:19:29 +03:00
|
|
|
#include "propagateremotedelete.h"
|
2014-11-11 18:09:01 +03:00
|
|
|
#include "propagateremotemove.h"
|
2014-11-13 20:57:07 +03:00
|
|
|
#include "propagateremotemkdir.h"
|
2014-02-18 14:52:38 +04:00
|
|
|
#include "propagatorjobs.h"
|
2014-11-10 01:25:57 +03:00
|
|
|
#include "configfile.h"
|
2014-07-11 02:31:24 +04:00
|
|
|
#include "utility.h"
|
2015-03-27 13:11:44 +03:00
|
|
|
#include "account.h"
|
2014-07-28 14:12:52 +04:00
|
|
|
#include <json.h>
|
2013-05-03 21:11:00 +04:00
|
|
|
|
2014-05-23 20:54:35 +04:00
|
|
|
#ifdef Q_OS_WIN
|
|
|
|
#include <windef.h>
|
|
|
|
#include <winbase.h>
|
|
|
|
#endif
|
|
|
|
|
2014-02-18 14:52:38 +04:00
|
|
|
#include <QStack>
|
2014-05-26 19:36:52 +04:00
|
|
|
#include <QFileInfo>
|
2014-07-07 15:00:38 +04:00
|
|
|
#include <QDir>
|
2014-09-29 12:30:39 +04:00
|
|
|
#include <QTimer>
|
|
|
|
#include <QObject>
|
|
|
|
#include <QTimerEvent>
|
2015-03-30 09:41:37 +03:00
|
|
|
#include <QDebug>
|
2013-05-03 21:11:00 +04:00
|
|
|
|
2014-11-10 00:34:07 +03:00
|
|
|
namespace OCC {
|
2013-12-10 20:19:25 +04:00
|
|
|
|
2015-10-01 12:39:09 +03:00
|
|
|
qint64 criticalFreeSpaceLimit()
|
|
|
|
{
|
|
|
|
qint64 value = 50 * 1000 * 1000LL;
|
|
|
|
|
|
|
|
static bool hasEnv = false;
|
2015-11-10 14:06:57 +03:00
|
|
|
static qint64 env = qgetenv("OWNCLOUD_CRITICAL_FREE_SPACE_BYTES").toLongLong(&hasEnv);
|
2015-10-01 12:39:09 +03:00
|
|
|
if (hasEnv) {
|
|
|
|
value = env;
|
|
|
|
}
|
|
|
|
|
|
|
|
return qBound(0LL, value, freeSpaceLimit());
|
|
|
|
}
|
|
|
|
|
|
|
|
qint64 freeSpaceLimit()
|
|
|
|
{
|
|
|
|
qint64 value = 250 * 1000 * 1000LL;
|
|
|
|
|
|
|
|
static bool hasEnv = false;
|
2015-11-10 14:06:57 +03:00
|
|
|
static qint64 env = qgetenv("OWNCLOUD_FREE_SPACE_BYTES").toLongLong(&hasEnv);
|
2015-10-01 12:39:09 +03:00
|
|
|
if (hasEnv) {
|
|
|
|
value = env;
|
|
|
|
}
|
|
|
|
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
2015-03-27 13:11:44 +03:00
|
|
|
OwncloudPropagator::~OwncloudPropagator()
|
|
|
|
{}
|
|
|
|
|
2015-10-05 06:20:09 +03:00
|
|
|
/* The maximum number of active jobs in parallel */
|
2014-09-15 19:55:55 +04:00
|
|
|
int OwncloudPropagator::maximumActiveJob()
|
|
|
|
{
|
2015-08-06 16:28:09 +03:00
|
|
|
static int max = qgetenv("OWNCLOUD_MAX_PARALLEL").toUInt();
|
|
|
|
if (!max) {
|
|
|
|
max = 3; //default
|
|
|
|
}
|
|
|
|
|
2015-08-06 19:13:48 +03:00
|
|
|
if (_downloadLimit.fetchAndAddAcquire(0) != 0 || _uploadLimit.fetchAndAddAcquire(0) != 0) {
|
2015-08-06 12:10:27 +03:00
|
|
|
// disable parallelism when there is a network limit.
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2014-03-28 14:20:07 +04:00
|
|
|
return max;
|
|
|
|
}
|
2014-02-12 14:07:34 +04:00
|
|
|
|
2016-02-25 19:40:24 +03:00
|
|
|
int OwncloudPropagator::hardMaximumActiveJob()
|
|
|
|
{
|
|
|
|
int max = maximumActiveJob();
|
|
|
|
return max*2;
|
|
|
|
// FIXME: Wondering if we should hard-limit to 1 if maximumActiveJob() is 1
|
|
|
|
// to support our old use case of limiting concurrency (when "automatic" bandwidth
|
|
|
|
// limiting is set. But this causes https://github.com/owncloud/client/issues/4081
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-10-29 11:35:42 +03:00
|
|
|
/** Updates, creates or removes a blacklist entry for the given item.
|
2014-10-09 16:49:51 +04:00
|
|
|
*
|
|
|
|
* Returns whether the file is in the blacklist now.
|
|
|
|
*/
|
2015-10-29 11:35:42 +03:00
|
|
|
static bool blacklistCheck(SyncJournalDb* journal, const SyncFileItem& item)
|
2014-10-09 16:49:51 +04:00
|
|
|
{
|
2015-01-16 12:17:19 +03:00
|
|
|
SyncJournalErrorBlacklistRecord oldEntry = journal->errorBlacklistEntry(item._file);
|
|
|
|
SyncJournalErrorBlacklistRecord newEntry = SyncJournalErrorBlacklistRecord::update(oldEntry, item);
|
2014-10-09 16:49:51 +04:00
|
|
|
|
|
|
|
if (newEntry.isValid()) {
|
2015-01-16 12:17:19 +03:00
|
|
|
journal->updateErrorBlacklistEntry(newEntry);
|
2014-10-09 16:49:51 +04:00
|
|
|
} else if (oldEntry.isValid()) {
|
2015-01-16 12:17:19 +03:00
|
|
|
journal->wipeErrorBlacklistEntry(item._file);
|
2014-10-09 16:49:51 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return newEntry.isValid();
|
|
|
|
}
|
|
|
|
|
2013-11-20 16:44:01 +04:00
|
|
|
void PropagateItemJob::done(SyncFileItem::Status status, const QString &errorString)
|
|
|
|
{
|
2014-11-18 19:35:31 +03:00
|
|
|
_state = Finished;
|
2015-04-15 16:19:11 +03:00
|
|
|
if (_item->_isRestoration) {
|
2014-06-07 13:49:46 +04:00
|
|
|
if( status == SyncFileItem::Success || status == SyncFileItem::Conflict) {
|
2014-06-23 15:56:17 +04:00
|
|
|
status = SyncFileItem::Restoration;
|
2014-06-07 13:49:46 +04:00
|
|
|
} else {
|
2015-04-15 16:19:11 +03:00
|
|
|
_item->_errorString += tr("; Restoration Failed: %1").arg(errorString);
|
2014-06-07 13:49:46 +04:00
|
|
|
}
|
|
|
|
} else {
|
2015-04-15 16:19:11 +03:00
|
|
|
if( _item->_errorString.isEmpty() ) {
|
|
|
|
_item->_errorString = errorString;
|
2014-09-25 17:03:54 +04:00
|
|
|
}
|
2014-06-07 13:49:46 +04:00
|
|
|
}
|
2014-08-19 16:14:01 +04:00
|
|
|
|
2014-10-10 16:52:37 +04:00
|
|
|
if( _propagator->_abortRequested.fetchAndAddRelaxed(0) &&
|
|
|
|
(status == SyncFileItem::NormalError || status == SyncFileItem::FatalError)) {
|
2014-08-19 16:14:01 +04:00
|
|
|
// an abort request is ongoing. Change the status to Soft-Error
|
|
|
|
status = SyncFileItem::SoftError;
|
|
|
|
}
|
|
|
|
|
2013-11-20 16:44:01 +04:00
|
|
|
switch( status ) {
|
2013-11-26 14:31:05 +04:00
|
|
|
case SyncFileItem::SoftError:
|
2013-11-20 16:44:01 +04:00
|
|
|
case SyncFileItem::FatalError:
|
2014-05-26 14:27:16 +04:00
|
|
|
// do not blacklist in case of soft error or fatal error.
|
|
|
|
break;
|
2013-11-20 16:44:01 +04:00
|
|
|
case SyncFileItem::NormalError:
|
2015-10-29 11:35:42 +03:00
|
|
|
if (blacklistCheck(_propagator->_journal, *_item) && _item->_hasBlacklistEntry) {
|
2014-10-09 16:49:51 +04:00
|
|
|
// do not error if the item was, and continues to be, blacklisted
|
|
|
|
status = SyncFileItem::FileIgnored;
|
2015-04-15 16:19:11 +03:00
|
|
|
_item->_errorString.prepend(tr("Continue blacklisting:") + " ");
|
2014-05-02 19:25:17 +04:00
|
|
|
}
|
2013-11-20 16:44:01 +04:00
|
|
|
break;
|
|
|
|
case SyncFileItem::Success:
|
2014-06-23 15:56:17 +04:00
|
|
|
case SyncFileItem::Restoration:
|
2015-04-15 16:19:11 +03:00
|
|
|
if( _item->_hasBlacklistEntry ) {
|
2013-11-20 16:44:01 +04:00
|
|
|
// wipe blacklist entry.
|
2015-04-15 16:19:11 +03:00
|
|
|
_propagator->_journal->wipeErrorBlacklistEntry(_item->_file);
|
2014-10-03 13:35:18 +04:00
|
|
|
// remove a blacklist entry in case the file was moved.
|
2015-04-15 16:19:11 +03:00
|
|
|
if( _item->_originalFile != _item->_file ) {
|
|
|
|
_propagator->_journal->wipeErrorBlacklistEntry(_item->_originalFile);
|
2014-10-03 13:35:18 +04:00
|
|
|
}
|
2013-11-20 16:44:01 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case SyncFileItem::Conflict:
|
|
|
|
case SyncFileItem::FileIgnored:
|
|
|
|
case SyncFileItem::NoStatus:
|
|
|
|
// nothing
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-04-15 16:19:11 +03:00
|
|
|
_item->_status = status;
|
2014-10-09 16:49:51 +04:00
|
|
|
|
2015-08-11 14:45:02 +03:00
|
|
|
emit itemCompleted(*_item, *this);
|
2013-11-20 16:44:01 +04:00
|
|
|
emit finished(status);
|
|
|
|
}
|
|
|
|
|
2014-02-12 16:44:55 +04:00
|
|
|
/**
|
|
|
|
* For delete or remove, check that we are not removing from a shared directory.
|
|
|
|
* If we are, try to restore the file
|
|
|
|
*
|
|
|
|
* Return true if the problem is handled.
|
|
|
|
*/
|
2014-02-27 15:02:22 +04:00
|
|
|
bool PropagateItemJob::checkForProblemsWithShared(int httpStatusCode, const QString& msg)
|
2014-02-04 18:01:10 +04:00
|
|
|
{
|
2014-02-19 20:21:01 +04:00
|
|
|
PropagateItemJob *newJob = NULL;
|
|
|
|
|
2015-04-15 16:19:11 +03:00
|
|
|
if( httpStatusCode == 403 && _propagator->isInSharedDirectory(_item->_file )) {
|
|
|
|
if( !_item->_isDirectory ) {
|
|
|
|
SyncFileItemPtr downloadItem(new SyncFileItem(*_item));
|
2016-01-06 12:01:22 +03:00
|
|
|
if (downloadItem->_instruction == CSYNC_INSTRUCTION_NEW
|
|
|
|
|| downloadItem->_instruction == CSYNC_INSTRUCTION_TYPE_CHANGE) {
|
2014-02-21 13:53:09 +04:00
|
|
|
// don't try to recover pushing new files
|
|
|
|
return false;
|
2015-04-15 16:19:11 +03:00
|
|
|
} else if (downloadItem->_instruction == CSYNC_INSTRUCTION_SYNC) {
|
2015-10-05 06:20:09 +03:00
|
|
|
// we modified the file locally, just create a conflict then
|
2015-04-15 16:19:11 +03:00
|
|
|
downloadItem->_instruction = CSYNC_INSTRUCTION_CONFLICT;
|
2014-04-04 12:50:40 +04:00
|
|
|
|
|
|
|
// HACK to avoid continuation: See task #1448: We do not know the _modtime from the
|
|
|
|
// server, at this point, so just set the current one. (rather than the one locally)
|
2015-04-15 16:19:11 +03:00
|
|
|
downloadItem->_modtime = Utility::qDateTimeToTime_t(QDateTime::currentDateTime());
|
2014-02-21 13:53:09 +04:00
|
|
|
} else {
|
|
|
|
// the file was removed or renamed, just recover the old one
|
2015-04-15 16:19:11 +03:00
|
|
|
downloadItem->_instruction = CSYNC_INSTRUCTION_SYNC;
|
2014-02-21 13:53:09 +04:00
|
|
|
}
|
2015-04-15 16:19:11 +03:00
|
|
|
downloadItem->_direction = SyncFileItem::Down;
|
2016-05-18 17:42:55 +03:00
|
|
|
newJob = new PropagateDownloadFile(_propagator, downloadItem);
|
2014-02-12 16:44:55 +04:00
|
|
|
} else {
|
|
|
|
// Directories are harder to recover.
|
|
|
|
// But just re-create the directory, next sync will be able to recover the files
|
2015-04-15 16:19:11 +03:00
|
|
|
SyncFileItemPtr mkdirItem(new SyncFileItem(*_item));
|
2016-08-15 15:17:51 +03:00
|
|
|
mkdirItem->_instruction = CSYNC_INSTRUCTION_NEW;
|
2015-04-15 16:19:11 +03:00
|
|
|
mkdirItem->_direction = SyncFileItem::Down;
|
2014-02-19 20:21:01 +04:00
|
|
|
newJob = new PropagateLocalMkdir(_propagator, mkdirItem);
|
2014-02-12 16:44:55 +04:00
|
|
|
// Also remove the inodes and fileid from the db so no further renames are tried for
|
|
|
|
// this item.
|
2015-04-15 16:19:11 +03:00
|
|
|
_propagator->_journal->avoidRenamesOnNextSync(_item->_file);
|
2014-09-10 19:25:13 +04:00
|
|
|
_propagator->_anotherSyncNeeded = true;
|
2014-02-12 16:44:55 +04:00
|
|
|
}
|
2014-02-19 20:21:01 +04:00
|
|
|
if( newJob ) {
|
|
|
|
newJob->setRestoreJobMsg(msg);
|
|
|
|
_restoreJob.reset(newJob);
|
2015-08-11 14:45:02 +03:00
|
|
|
connect(_restoreJob.data(), SIGNAL(itemCompleted(const SyncFileItemPtr &, const PropagatorJob &)),
|
|
|
|
this, SLOT(slotRestoreJobCompleted(const SyncFileItemPtr &)));
|
2014-04-29 18:56:19 +04:00
|
|
|
QMetaObject::invokeMethod(newJob, "start");
|
2014-02-19 20:21:01 +04:00
|
|
|
}
|
2014-02-04 18:01:10 +04:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void PropagateItemJob::slotRestoreJobCompleted(const SyncFileItem& item )
|
|
|
|
{
|
2014-02-19 20:21:01 +04:00
|
|
|
QString msg;
|
|
|
|
if(_restoreJob) {
|
|
|
|
msg = _restoreJob->restoreJobMsg();
|
|
|
|
_restoreJob->setRestoreJobMsg();
|
|
|
|
}
|
|
|
|
|
2014-06-23 15:56:17 +04:00
|
|
|
if( item._status == SyncFileItem::Success || item._status == SyncFileItem::Conflict
|
|
|
|
|| item._status == SyncFileItem::Restoration) {
|
2014-02-19 20:21:01 +04:00
|
|
|
done( SyncFileItem::SoftError, msg);
|
2014-02-04 18:01:10 +04:00
|
|
|
} else {
|
2015-09-07 09:51:22 +03:00
|
|
|
done( item._status, tr("A file or folder was removed from a read only share, but restoring failed: %1").arg(item._errorString) );
|
2014-02-04 18:01:10 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-23 20:54:35 +04:00
|
|
|
// ================================================================================
|
|
|
|
|
2015-04-15 16:19:11 +03:00
|
|
|
PropagateItemJob* OwncloudPropagator::createJob(const SyncFileItemPtr &item) {
|
2016-01-06 12:01:22 +03:00
|
|
|
bool deleteExisting = item->_instruction == CSYNC_INSTRUCTION_TYPE_CHANGE;
|
2015-04-15 16:19:11 +03:00
|
|
|
switch(item->_instruction) {
|
2013-10-28 13:47:10 +04:00
|
|
|
case CSYNC_INSTRUCTION_REMOVE:
|
2015-04-15 16:19:11 +03:00
|
|
|
if (item->_direction == SyncFileItem::Down) return new PropagateLocalRemove(this, item);
|
2014-11-11 15:19:29 +03:00
|
|
|
else return new PropagateRemoteDelete(this, item);
|
2013-10-28 13:47:10 +04:00
|
|
|
case CSYNC_INSTRUCTION_NEW:
|
2016-01-06 12:01:22 +03:00
|
|
|
case CSYNC_INSTRUCTION_TYPE_CHANGE:
|
2015-04-15 16:19:11 +03:00
|
|
|
if (item->_isDirectory) {
|
2016-01-06 12:01:22 +03:00
|
|
|
if (item->_direction == SyncFileItem::Down) {
|
|
|
|
auto job = new PropagateLocalMkdir(this, item);
|
|
|
|
job->setDeleteExistingFile(deleteExisting);
|
|
|
|
return job;
|
|
|
|
} else {
|
|
|
|
auto job = new PropagateRemoteMkdir(this, item);
|
|
|
|
job->setDeleteExisting(deleteExisting);
|
|
|
|
return job;
|
|
|
|
}
|
2015-10-05 06:20:09 +03:00
|
|
|
} //fall through
|
2013-10-28 13:47:10 +04:00
|
|
|
case CSYNC_INSTRUCTION_SYNC:
|
|
|
|
case CSYNC_INSTRUCTION_CONFLICT:
|
2016-08-15 15:17:51 +03:00
|
|
|
if (item->_direction != SyncFileItem::Up) {
|
2016-05-18 17:42:55 +03:00
|
|
|
auto job = new PropagateDownloadFile(this, item);
|
2016-08-15 15:17:51 +03:00
|
|
|
job->setDeleteExistingFolder(deleteExisting);
|
|
|
|
return job;
|
|
|
|
} else {
|
2016-08-03 18:15:23 +03:00
|
|
|
PropagateUploadFileCommon *job = 0;
|
2016-10-21 17:42:27 +03:00
|
|
|
static const auto chunkng = qgetenv("OWNCLOUD_CHUNKING_NG");
|
|
|
|
if (item->_size > chunkSize()
|
|
|
|
&& (account()->capabilities().chunkingNg() || chunkng == "1") && chunkng != "0") {
|
2016-08-03 18:15:23 +03:00
|
|
|
job = new PropagateUploadFileNG(this, item);
|
|
|
|
} else {
|
|
|
|
job = new PropagateUploadFileV1(this, item);
|
|
|
|
}
|
2016-08-15 15:17:51 +03:00
|
|
|
job->setDeleteExisting(deleteExisting);
|
|
|
|
return job;
|
2014-02-18 15:24:35 +04:00
|
|
|
}
|
2013-10-28 13:47:10 +04:00
|
|
|
case CSYNC_INSTRUCTION_RENAME:
|
2015-04-15 16:19:11 +03:00
|
|
|
if (item->_direction == SyncFileItem::Up) {
|
2014-11-11 18:09:01 +03:00
|
|
|
return new PropagateRemoteMove(this, item);
|
2013-10-30 20:36:58 +04:00
|
|
|
} else {
|
|
|
|
return new PropagateLocalRename(this, item);
|
|
|
|
}
|
2013-10-28 13:47:10 +04:00
|
|
|
case CSYNC_INSTRUCTION_IGNORE:
|
2014-06-13 13:19:31 +04:00
|
|
|
case CSYNC_INSTRUCTION_ERROR:
|
2013-10-28 13:47:10 +04:00
|
|
|
return new PropagateIgnoreJob(this, item);
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2013-08-14 21:59:16 +04:00
|
|
|
|
2014-06-27 15:34:15 +04:00
|
|
|
void OwncloudPropagator::start(const SyncFileItemVector& items)
|
2013-10-28 13:47:10 +04:00
|
|
|
{
|
2015-02-12 21:15:55 +03:00
|
|
|
Q_ASSERT(std::is_sorted(items.begin(), items.end()));
|
|
|
|
|
2015-10-05 06:20:09 +03:00
|
|
|
/* This builds all the jobs needed for the propagation.
|
|
|
|
* Each directory is a PropagateDirectory job, which contains the files in it.
|
2014-06-27 15:34:15 +04:00
|
|
|
* In order to do that we loop over the items. (which are sorted by destination)
|
2015-10-05 06:20:09 +03:00
|
|
|
* When we enter a directory, we can create the directory job and push it on the stack. */
|
2014-06-27 15:34:15 +04:00
|
|
|
|
2013-10-28 13:47:10 +04:00
|
|
|
_rootJob.reset(new PropagateDirectory(this));
|
2013-10-28 20:00:27 +04:00
|
|
|
QStack<QPair<QString /* directory name */, PropagateDirectory* /* job */> > directories;
|
2013-10-28 13:47:10 +04:00
|
|
|
directories.push(qMakePair(QString(), _rootJob.data()));
|
2013-10-28 20:00:27 +04:00
|
|
|
QVector<PropagatorJob*> directoriesToRemove;
|
2013-10-28 13:47:10 +04:00
|
|
|
QString removedDirectory;
|
2015-04-15 16:19:11 +03:00
|
|
|
foreach(const SyncFileItemPtr &item, items) {
|
2014-06-04 18:37:46 +04:00
|
|
|
|
2015-04-15 16:19:11 +03:00
|
|
|
if (!removedDirectory.isEmpty() && item->_file.startsWith(removedDirectory)) {
|
2014-06-04 18:37:46 +04:00
|
|
|
// this is an item in a directory which is going to be removed.
|
2016-11-22 19:25:42 +03:00
|
|
|
PropagateDirectory *delDirJob = qobject_cast<PropagateDirectory*>(directoriesToRemove.first());
|
2014-12-06 14:27:50 +03:00
|
|
|
|
2015-04-15 16:19:11 +03:00
|
|
|
if (item->_instruction == CSYNC_INSTRUCTION_REMOVE) {
|
2015-10-05 06:20:09 +03:00
|
|
|
// already taken care of. (by the removal of the parent directory)
|
2014-12-06 14:27:50 +03:00
|
|
|
|
|
|
|
// increase the number of subjobs that would be there.
|
|
|
|
if( delDirJob ) {
|
|
|
|
delDirJob->increaseAffectedCount();
|
|
|
|
}
|
2014-06-04 18:37:46 +04:00
|
|
|
continue;
|
2016-01-06 12:01:22 +03:00
|
|
|
} else if (item->_isDirectory
|
|
|
|
&& (item->_instruction == CSYNC_INSTRUCTION_NEW
|
|
|
|
|| item->_instruction == CSYNC_INSTRUCTION_TYPE_CHANGE)) {
|
2014-06-04 18:37:46 +04:00
|
|
|
// create a new directory within a deleted directory? That can happen if the directory
|
2015-10-05 06:20:09 +03:00
|
|
|
// etag was not fetched properly on the previous sync because the sync was aborted
|
2014-06-04 18:37:46 +04:00
|
|
|
// while uploading this directory (which is now removed). We can ignore it.
|
2014-12-06 14:27:50 +03:00
|
|
|
if( delDirJob ) {
|
|
|
|
delDirJob->increaseAffectedCount();
|
|
|
|
}
|
2014-06-04 18:37:46 +04:00
|
|
|
continue;
|
2015-04-15 16:19:11 +03:00
|
|
|
} else if (item->_instruction == CSYNC_INSTRUCTION_IGNORE) {
|
2014-06-04 18:37:46 +04:00
|
|
|
continue;
|
2016-01-07 12:04:04 +03:00
|
|
|
} else if (item->_instruction == CSYNC_INSTRUCTION_RENAME) {
|
|
|
|
// all is good, the rename will be executed before the directory deletion
|
|
|
|
} else {
|
|
|
|
qWarning() << "WARNING: Job within a removed directory? This should not happen!"
|
|
|
|
<< item->_file << item->_instruction;
|
2014-06-04 18:37:46 +04:00
|
|
|
}
|
2013-08-14 21:59:16 +04:00
|
|
|
}
|
|
|
|
|
2015-04-15 16:19:11 +03:00
|
|
|
while (!item->destination().startsWith(directories.top().first)) {
|
2013-10-28 13:47:10 +04:00
|
|
|
directories.pop();
|
2013-08-14 21:59:16 +04:00
|
|
|
}
|
2013-10-28 13:47:10 +04:00
|
|
|
|
2015-04-15 16:19:11 +03:00
|
|
|
if (item->_isDirectory) {
|
2013-10-28 13:47:10 +04:00
|
|
|
PropagateDirectory *dir = new PropagateDirectory(this, item);
|
|
|
|
dir->_firstJob.reset(createJob(item));
|
2016-01-06 12:01:22 +03:00
|
|
|
|
|
|
|
if (item->_instruction == CSYNC_INSTRUCTION_TYPE_CHANGE
|
|
|
|
&& item->_direction == SyncFileItem::Up) {
|
|
|
|
// Skip all potential uploads to the new folder.
|
|
|
|
// Processing them now leads to problems with permissions:
|
|
|
|
// checkForPermissions() has already run and used the permissions
|
|
|
|
// of the file we're about to delete to decide whether uploading
|
|
|
|
// to the new dir is ok...
|
|
|
|
foreach(const SyncFileItemPtr &item2, items) {
|
|
|
|
if (item2->destination().startsWith(item->destination() + "/")) {
|
|
|
|
item2->_instruction = CSYNC_INSTRUCTION_NONE;
|
|
|
|
_anotherSyncNeeded = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-15 16:19:11 +03:00
|
|
|
if (item->_instruction == CSYNC_INSTRUCTION_REMOVE) {
|
2015-10-05 06:20:09 +03:00
|
|
|
// We do the removal of directories at the end, because there might be moves from
|
|
|
|
// these directories that will happen later.
|
2015-12-22 15:02:02 +03:00
|
|
|
directoriesToRemove.prepend(dir);
|
2015-04-15 16:19:11 +03:00
|
|
|
removedDirectory = item->_file + "/";
|
2014-06-04 14:31:30 +04:00
|
|
|
|
|
|
|
// We should not update the etag of parent directories of the removed directory
|
|
|
|
// since it would be done before the actual remove (issue #1845)
|
|
|
|
// NOTE: Currently this means that we don't update those etag at all in this sync,
|
|
|
|
// but it should not be a problem, they will be updated in the next sync.
|
|
|
|
for (int i = 0; i < directories.size(); ++i) {
|
2016-08-15 15:17:51 +03:00
|
|
|
if (directories[i].second->_item->_instruction == CSYNC_INSTRUCTION_UPDATE_METADATA)
|
|
|
|
directories[i].second->_item->_instruction = CSYNC_INSTRUCTION_NONE;
|
2014-06-04 14:31:30 +04:00
|
|
|
}
|
2013-10-28 13:47:10 +04:00
|
|
|
} else {
|
2014-06-04 14:31:30 +04:00
|
|
|
PropagateDirectory* currentDirJob = directories.top().second;
|
|
|
|
currentDirJob->append(dir);
|
2013-08-14 21:59:16 +04:00
|
|
|
}
|
2015-04-15 16:19:11 +03:00
|
|
|
directories.push(qMakePair(item->destination() + "/" , dir));
|
2013-10-28 13:47:10 +04:00
|
|
|
} else if (PropagateItemJob* current = createJob(item)) {
|
2016-01-06 12:01:22 +03:00
|
|
|
if (item->_instruction == CSYNC_INSTRUCTION_TYPE_CHANGE) {
|
|
|
|
// will delete directories, so defer execution
|
|
|
|
directoriesToRemove.prepend(current);
|
|
|
|
removedDirectory = item->_file + "/";
|
2015-12-22 15:02:02 +03:00
|
|
|
} else {
|
|
|
|
directories.top().second->append(current);
|
|
|
|
}
|
2013-08-14 21:59:16 +04:00
|
|
|
}
|
|
|
|
}
|
2013-10-28 13:47:10 +04:00
|
|
|
|
|
|
|
foreach(PropagatorJob* it, directoriesToRemove) {
|
|
|
|
_rootJob->append(it);
|
|
|
|
}
|
|
|
|
|
2015-08-11 14:45:02 +03:00
|
|
|
connect(_rootJob.data(), SIGNAL(itemCompleted(const SyncFileItem &, const PropagatorJob &)),
|
|
|
|
this, SIGNAL(itemCompleted(const SyncFileItem &, const PropagatorJob &)));
|
2015-04-15 16:19:11 +03:00
|
|
|
connect(_rootJob.data(), SIGNAL(progress(const SyncFileItem &,quint64)), this, SIGNAL(progress(const SyncFileItem &,quint64)));
|
2016-08-02 11:30:49 +03:00
|
|
|
connect(_rootJob.data(), SIGNAL(finished(SyncFileItem::Status)), this, SLOT(emitFinished(SyncFileItem::Status)));
|
2014-11-18 19:35:31 +03:00
|
|
|
connect(_rootJob.data(), SIGNAL(ready()), this, SLOT(scheduleNextJob()), Qt::QueuedConnection);
|
2013-11-21 14:13:58 +04:00
|
|
|
|
2015-10-20 17:58:32 +03:00
|
|
|
qDebug() << "Using QNAM/HTTP parallel code path";
|
2014-04-03 18:37:40 +04:00
|
|
|
|
2014-11-18 19:35:31 +03:00
|
|
|
QTimer::singleShot(0, this, SLOT(scheduleNextJob()));
|
2013-08-14 21:59:16 +04:00
|
|
|
}
|
2013-05-03 21:11:00 +04:00
|
|
|
|
2016-11-15 20:47:04 +03:00
|
|
|
// ownCloud server < 7.0 did not had permissions so we need some other euristics
|
|
|
|
// to detect wrong doing in a Shared directory
|
2014-02-04 18:01:10 +04:00
|
|
|
bool OwncloudPropagator::isInSharedDirectory(const QString& file)
|
|
|
|
{
|
|
|
|
bool re = false;
|
2016-11-15 20:47:04 +03:00
|
|
|
if( _remoteFolder.startsWith( QLatin1String("Shared") ) ) {
|
2014-02-04 18:01:10 +04:00
|
|
|
// The Shared directory is synced as its own sync connection
|
|
|
|
re = true;
|
|
|
|
} else {
|
2014-02-19 18:23:36 +04:00
|
|
|
if( file.startsWith("Shared/") || file == "Shared" ) {
|
2014-02-04 18:01:10 +04:00
|
|
|
// The whole ownCloud is synced and Shared is always a top dir
|
|
|
|
re = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return re;
|
|
|
|
}
|
|
|
|
|
2014-05-23 20:54:35 +04:00
|
|
|
int OwncloudPropagator::httpTimeout()
|
|
|
|
{
|
|
|
|
static int timeout;
|
|
|
|
if (!timeout) {
|
|
|
|
timeout = qgetenv("OWNCLOUD_TIMEOUT").toUInt();
|
|
|
|
if (timeout == 0) {
|
2014-11-10 00:30:29 +03:00
|
|
|
ConfigFile cfg;
|
2014-05-28 18:28:22 +04:00
|
|
|
timeout = cfg.timeout();
|
2014-05-23 20:54:35 +04:00
|
|
|
}
|
2014-05-28 18:28:22 +04:00
|
|
|
|
2014-05-23 20:54:35 +04:00
|
|
|
}
|
|
|
|
return timeout;
|
|
|
|
}
|
|
|
|
|
2016-01-15 15:16:52 +03:00
|
|
|
quint64 OwncloudPropagator::chunkSize()
|
|
|
|
{
|
|
|
|
static uint chunkSize;
|
|
|
|
if (!chunkSize) {
|
|
|
|
chunkSize = qgetenv("OWNCLOUD_CHUNK_SIZE").toUInt();
|
|
|
|
if (chunkSize == 0) {
|
|
|
|
ConfigFile cfg;
|
|
|
|
chunkSize = cfg.chunkSize();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return chunkSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-05-23 20:54:35 +04:00
|
|
|
bool OwncloudPropagator::localFileNameClash( const QString& relFile )
|
|
|
|
{
|
|
|
|
bool re = false;
|
|
|
|
const QString file( _localDir + relFile );
|
2014-05-26 19:36:52 +04:00
|
|
|
|
2014-05-26 19:52:24 +04:00
|
|
|
if( !file.isEmpty() && Utility::fsCasePreserving() ) {
|
|
|
|
#ifdef Q_OS_MAC
|
|
|
|
QFileInfo fileInfo(file);
|
2014-05-30 17:46:51 +04:00
|
|
|
if (!fileInfo.exists()) {
|
2014-05-26 19:52:24 +04:00
|
|
|
re = false;
|
2014-07-23 19:54:12 +04:00
|
|
|
qDebug() << Q_FUNC_INFO << "No valid fileinfo";
|
2014-05-30 17:46:51 +04:00
|
|
|
} else {
|
2014-07-23 19:54:12 +04:00
|
|
|
// Need to normalize to composited form because of
|
|
|
|
// https://bugreports.qt-project.org/browse/QTBUG-39622
|
|
|
|
const QString cName = fileInfo.canonicalFilePath().normalized(QString::NormalizationForm_C);
|
|
|
|
// qDebug() << Q_FUNC_INFO << "comparing " << cName << " with " << file;
|
|
|
|
bool equal = (file == cName);
|
|
|
|
re = (!equal && ! cName.endsWith(relFile, Qt::CaseSensitive) );
|
|
|
|
// qDebug() << Q_FUNC_INFO << "Returning for localFileNameClash: " << re;
|
2014-05-30 17:46:51 +04:00
|
|
|
}
|
2014-05-26 19:52:24 +04:00
|
|
|
#elif defined(Q_OS_WIN)
|
|
|
|
const QString file( _localDir + relFile );
|
|
|
|
qDebug() << "CaseClashCheck for " << file;
|
|
|
|
WIN32_FIND_DATA FindFileData;
|
|
|
|
HANDLE hFind;
|
|
|
|
|
|
|
|
hFind = FindFirstFileW( (wchar_t*)file.utf16(), &FindFileData);
|
|
|
|
if (hFind == INVALID_HANDLE_VALUE) {
|
2014-06-02 21:38:04 +04:00
|
|
|
//qDebug() << "FindFirstFile failed " << GetLastError();
|
2014-05-26 19:52:24 +04:00
|
|
|
// returns false.
|
|
|
|
} else {
|
|
|
|
QString realFileName = QString::fromWCharArray( FindFileData.cFileName );
|
|
|
|
FindClose(hFind);
|
2014-05-23 20:54:35 +04:00
|
|
|
|
2014-05-26 19:52:24 +04:00
|
|
|
if( ! file.endsWith(realFileName, Qt::CaseSensitive) ) {
|
2014-10-06 17:39:49 +04:00
|
|
|
qDebug() << Q_FUNC_INFO << "Detected case clash between" << file << "and" << realFileName;
|
2014-05-26 19:52:24 +04:00
|
|
|
re = true;
|
|
|
|
}
|
2014-05-23 20:54:35 +04:00
|
|
|
}
|
2014-07-07 15:00:38 +04:00
|
|
|
#else
|
2015-10-05 06:20:09 +03:00
|
|
|
// On Linux, the file system is case sensitive, but this code is useful for testing.
|
2014-07-07 15:00:38 +04:00
|
|
|
// Just check that there is no other file with the same name and different casing.
|
|
|
|
QFileInfo fileInfo(file);
|
|
|
|
const QString fn = fileInfo.fileName();
|
|
|
|
QStringList list = fileInfo.dir().entryList(QStringList() << fn);
|
|
|
|
if (list.count() > 1 || (list.count() == 1 && list[0] != fn)) {
|
|
|
|
re = true;
|
|
|
|
}
|
2014-05-23 20:54:35 +04:00
|
|
|
#endif
|
2014-05-26 19:52:24 +04:00
|
|
|
}
|
2014-05-23 20:54:35 +04:00
|
|
|
return re;
|
|
|
|
}
|
|
|
|
|
2014-09-03 14:11:03 +04:00
|
|
|
QString OwncloudPropagator::getFilePath(const QString& tmp_file_name) const
|
|
|
|
{
|
|
|
|
return _localDir + tmp_file_name;
|
|
|
|
}
|
|
|
|
|
2014-11-18 19:35:31 +03:00
|
|
|
void OwncloudPropagator::scheduleNextJob()
|
|
|
|
{
|
2016-02-25 19:40:24 +03:00
|
|
|
// TODO: If we see that the automatic up-scaling has a bad impact we
|
|
|
|
// need to check how to avoid this.
|
|
|
|
// Down-scaling on slow networks? https://github.com/owncloud/client/issues/3382
|
|
|
|
// Making sure we do up/down at same time? https://github.com/owncloud/client/issues/1633
|
|
|
|
|
|
|
|
if (_activeJobList.count() < maximumActiveJob()) {
|
2014-11-18 19:35:31 +03:00
|
|
|
if (_rootJob->scheduleNextJob()) {
|
2016-02-23 16:27:35 +03:00
|
|
|
QTimer::singleShot(0, this, SLOT(scheduleNextJob()));
|
2014-11-18 19:35:31 +03:00
|
|
|
}
|
2016-02-25 19:40:24 +03:00
|
|
|
} else if (_activeJobList.count() < hardMaximumActiveJob()) {
|
|
|
|
int likelyFinishedQuicklyCount = 0;
|
|
|
|
// NOTE: Only counts the first 3 jobs! Then for each
|
|
|
|
// one that is likely finished quickly, we can launch another one.
|
|
|
|
// When a job finishes another one will "move up" to be one of the first 3 and then
|
|
|
|
// be counted too.
|
|
|
|
for (int i = 0; i < maximumActiveJob() && i < _activeJobList.count(); i++) {
|
|
|
|
if (_activeJobList.at(i)->isLikelyFinishedQuickly()) {
|
|
|
|
likelyFinishedQuicklyCount++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (_activeJobList.count() < maximumActiveJob() + likelyFinishedQuicklyCount) {
|
2016-10-14 15:23:55 +03:00
|
|
|
qDebug() << "Can pump in another request! activeJobs =" << _activeJobList.count();
|
2016-02-25 19:40:24 +03:00
|
|
|
if (_rootJob->scheduleNextJob()) {
|
|
|
|
QTimer::singleShot(0, this, SLOT(scheduleNextJob()));
|
|
|
|
}
|
|
|
|
}
|
2014-11-18 19:35:31 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-18 14:09:48 +03:00
|
|
|
AccountPtr OwncloudPropagator::account() const
|
|
|
|
{
|
|
|
|
return _account;
|
|
|
|
}
|
|
|
|
|
2015-10-01 12:39:09 +03:00
|
|
|
OwncloudPropagator::DiskSpaceResult OwncloudPropagator::diskSpaceCheck() const
|
|
|
|
{
|
|
|
|
const qint64 freeBytes = Utility::freeDiskSpace(_localDir);
|
|
|
|
if (freeBytes < 0) {
|
|
|
|
return DiskSpaceOk;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (freeBytes < criticalFreeSpaceLimit()) {
|
|
|
|
return DiskSpaceCritical;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (freeBytes - _rootJob->committedDiskSpace() < freeSpaceLimit()) {
|
|
|
|
return DiskSpaceFailure;
|
|
|
|
}
|
|
|
|
|
|
|
|
return DiskSpaceOk;
|
|
|
|
}
|
|
|
|
|
2014-05-23 20:54:35 +04:00
|
|
|
// ================================================================================
|
2014-02-18 15:24:35 +04:00
|
|
|
|
2014-11-18 19:35:31 +03:00
|
|
|
PropagatorJob::JobParallelism PropagateDirectory::parallelism()
|
2013-11-15 20:42:27 +04:00
|
|
|
{
|
2014-11-18 19:35:31 +03:00
|
|
|
// If any of the non-finished sub jobs is not parallel, we have to wait
|
|
|
|
|
|
|
|
// FIXME! we should probably cache this result
|
|
|
|
|
|
|
|
if (_firstJob && _firstJob->_state != Finished) {
|
|
|
|
if (_firstJob->parallelism() != FullParallelism)
|
|
|
|
return WaitForFinished;
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: use the cached value of finished job
|
|
|
|
for (int i = 0; i < _subJobs.count(); ++i) {
|
|
|
|
if (_subJobs.at(i)->_state != Finished && _subJobs.at(i)->parallelism() != FullParallelism) {
|
|
|
|
return WaitForFinished;
|
|
|
|
}
|
2013-11-15 20:42:27 +04:00
|
|
|
}
|
2014-11-18 19:35:31 +03:00
|
|
|
return FullParallelism;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool PropagateDirectory::scheduleNextJob()
|
|
|
|
{
|
|
|
|
if (_state == Finished) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (_state == NotYetStarted) {
|
|
|
|
_state = Running;
|
|
|
|
|
|
|
|
if (!_firstJob && _subJobs.isEmpty()) {
|
2015-02-12 21:54:30 +03:00
|
|
|
finalize();
|
2014-11-18 19:35:31 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (_firstJob && _firstJob->_state == NotYetStarted) {
|
|
|
|
return possiblyRunNextJob(_firstJob.data());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (_firstJob && _firstJob->_state == Running) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-10-25 14:21:03 +03:00
|
|
|
// cache the value of first unfinished subjob
|
2014-11-18 19:35:31 +03:00
|
|
|
bool stopAtDirectory = false;
|
2016-10-25 14:21:03 +03:00
|
|
|
int i = _firstUnfinishedSubJob;
|
|
|
|
int subJobsCount = _subJobs.count();
|
|
|
|
while (i < subJobsCount && _subJobs.at(i)->_state == Finished) {
|
|
|
|
_firstUnfinishedSubJob = ++i;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = _firstUnfinishedSubJob; i < subJobsCount; ++i) {
|
2014-11-18 19:35:31 +03:00
|
|
|
if (_subJobs.at(i)->_state == Finished) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (stopAtDirectory && qobject_cast<PropagateDirectory*>(_subJobs.at(i))) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (possiblyRunNextJob(_subJobs.at(i))) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
Q_ASSERT(_subJobs.at(i)->_state == Running);
|
|
|
|
|
|
|
|
auto paral = _subJobs.at(i)->parallelism();
|
|
|
|
if (paral == WaitForFinished) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (paral == WaitForFinishedInParentDirectory) {
|
|
|
|
stopAtDirectory = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
2013-11-15 20:42:27 +04:00
|
|
|
}
|
|
|
|
|
2014-02-06 15:11:45 +04:00
|
|
|
void PropagateDirectory::slotSubJobFinished(SyncFileItem::Status status)
|
2013-10-28 13:47:10 +04:00
|
|
|
{
|
2014-06-23 15:56:17 +04:00
|
|
|
if (status == SyncFileItem::FatalError ||
|
2014-11-18 19:35:31 +03:00
|
|
|
(sender() == _firstJob.data() && status != SyncFileItem::Success && status != SyncFileItem::Restoration)) {
|
2014-02-06 17:52:56 +04:00
|
|
|
abort();
|
2014-11-27 17:36:13 +03:00
|
|
|
_state = Finished;
|
2013-10-28 13:47:10 +04:00
|
|
|
emit finished(status);
|
|
|
|
return;
|
2013-11-28 13:00:12 +04:00
|
|
|
} else if (status == SyncFileItem::NormalError || status == SyncFileItem::SoftError) {
|
|
|
|
_hasError = status;
|
2013-10-28 13:47:10 +04:00
|
|
|
}
|
2014-02-12 14:07:34 +04:00
|
|
|
_runningNow--;
|
2015-09-17 11:12:23 +03:00
|
|
|
_jobsFinished++;
|
2013-10-28 13:47:10 +04:00
|
|
|
|
2015-09-17 11:12:23 +03:00
|
|
|
int totalJobs = _subJobs.count();
|
|
|
|
if (_firstJob) {
|
|
|
|
totalJobs++;
|
2014-02-06 15:11:45 +04:00
|
|
|
}
|
|
|
|
|
2015-10-05 06:20:09 +03:00
|
|
|
// We finished processing all the jobs
|
2014-11-18 19:35:31 +03:00
|
|
|
// check if we finished
|
2015-09-17 11:12:23 +03:00
|
|
|
if (_jobsFinished >= totalJobs) {
|
2015-10-05 06:20:09 +03:00
|
|
|
Q_ASSERT(!_runningNow); // how can we be finished if there are still jobs running now
|
2015-02-12 21:54:30 +03:00
|
|
|
finalize();
|
|
|
|
} else {
|
|
|
|
emit ready();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void PropagateDirectory::finalize()
|
|
|
|
{
|
2016-04-07 12:47:04 +03:00
|
|
|
bool ok = true;
|
2015-04-15 16:19:11 +03:00
|
|
|
if (!_item->isEmpty() && _hasError == SyncFileItem::NoStatus) {
|
|
|
|
if( !_item->_renameTarget.isEmpty() ) {
|
2016-09-22 10:02:47 +03:00
|
|
|
if(_item->_instruction == CSYNC_INSTRUCTION_RENAME
|
|
|
|
&& _item->_originalFile != _item->_renameTarget) {
|
|
|
|
// Remove the stale entries from the database.
|
|
|
|
_propagator->_journal->deleteFileRecord(_item->_originalFile, true);
|
|
|
|
}
|
|
|
|
|
2015-04-15 16:19:11 +03:00
|
|
|
_item->_file = _item->_renameTarget;
|
2015-02-12 21:54:30 +03:00
|
|
|
}
|
2013-11-05 20:50:09 +04:00
|
|
|
|
2016-08-15 15:17:51 +03:00
|
|
|
// For new directories we always want to update the etag once
|
|
|
|
// the directory has been propagated. Otherwise the directory
|
|
|
|
// could appear locally without being added to the database.
|
|
|
|
if (_item->_instruction == CSYNC_INSTRUCTION_RENAME
|
|
|
|
|| _item->_instruction == CSYNC_INSTRUCTION_NEW
|
|
|
|
|| _item->_instruction == CSYNC_INSTRUCTION_UPDATE_METADATA) {
|
2015-02-12 21:54:30 +03:00
|
|
|
if (PropagateRemoteMkdir* mkdir = qobject_cast<PropagateRemoteMkdir*>(_firstJob.data())) {
|
|
|
|
// special case from MKDIR, get the fileId from the job there
|
2015-04-15 16:19:11 +03:00
|
|
|
if (_item->_fileId.isEmpty() && !mkdir->_item->_fileId.isEmpty()) {
|
|
|
|
_item->_fileId = mkdir->_item->_fileId;
|
2014-06-12 15:45:25 +04:00
|
|
|
}
|
2013-11-15 16:53:18 +04:00
|
|
|
}
|
2015-04-15 16:19:11 +03:00
|
|
|
SyncJournalFileRecord record(*_item, _propagator->_localDir + _item->_file);
|
2016-04-07 12:47:04 +03:00
|
|
|
ok = _propagator->_journal->setFileRecordMetadata(record);
|
|
|
|
if (!ok) {
|
2016-09-21 17:35:42 +03:00
|
|
|
_hasError = _item->_status = SyncFileItem::FatalError;
|
2016-04-07 12:47:04 +03:00
|
|
|
_item->_errorString = tr("Error writing metadata to the database");
|
|
|
|
qWarning() << "Error writing to the database for file" << _item->_file;
|
|
|
|
}
|
2013-10-28 13:47:10 +04:00
|
|
|
}
|
|
|
|
}
|
2015-02-12 21:54:30 +03:00
|
|
|
_state = Finished;
|
2016-09-21 17:35:42 +03:00
|
|
|
emit finished(_hasError == SyncFileItem::NoStatus ? SyncFileItem::Success : _hasError);
|
2013-10-28 13:47:10 +04:00
|
|
|
}
|
2013-05-03 21:11:00 +04:00
|
|
|
|
2015-10-01 12:39:09 +03:00
|
|
|
qint64 PropagateDirectory::committedDiskSpace() const
|
|
|
|
{
|
|
|
|
qint64 needed = 0;
|
|
|
|
foreach (PropagatorJob* job, _subJobs) {
|
|
|
|
needed += job->committedDiskSpace();
|
|
|
|
}
|
|
|
|
return needed;
|
|
|
|
}
|
|
|
|
|
2015-03-27 13:11:44 +03:00
|
|
|
CleanupPollsJob::~CleanupPollsJob()
|
|
|
|
{}
|
|
|
|
|
2014-07-28 14:12:52 +04:00
|
|
|
void CleanupPollsJob::start()
|
|
|
|
{
|
|
|
|
if (_pollInfos.empty()) {
|
|
|
|
emit finished();
|
|
|
|
deleteLater();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-11-11 12:10:46 +03:00
|
|
|
auto info = _pollInfos.first();
|
|
|
|
_pollInfos.pop_front();
|
2016-04-11 13:41:26 +03:00
|
|
|
SyncJournalFileRecord record = _journal->getFileRecord(info._file);
|
|
|
|
SyncFileItemPtr item(new SyncFileItem(record.toSyncFileItem()));
|
|
|
|
if (record.isValid()) {
|
|
|
|
PollJob *job = new PollJob(_account, info._url, item, _journal, _localPath, this);
|
|
|
|
connect(job, SIGNAL(finishedSignal()), SLOT(slotPollFinished()));
|
|
|
|
job->start();
|
|
|
|
}
|
2014-07-28 14:12:52 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
void CleanupPollsJob::slotPollFinished()
|
|
|
|
{
|
|
|
|
PollJob *job = qobject_cast<PollJob *>(sender());
|
|
|
|
Q_ASSERT(job);
|
2015-04-15 16:19:11 +03:00
|
|
|
if (job->_item->_status == SyncFileItem::FatalError) {
|
|
|
|
emit aborted(job->_item->_errorString);
|
2016-07-14 10:21:28 +03:00
|
|
|
deleteLater();
|
2014-07-29 17:51:22 +04:00
|
|
|
return;
|
2015-04-15 16:19:11 +03:00
|
|
|
} else if (job->_item->_status != SyncFileItem::Success) {
|
|
|
|
qDebug() << "There was an error with file " << job->_item->_file << job->_item->_errorString;
|
2014-07-28 14:12:52 +04:00
|
|
|
} else {
|
2016-04-07 12:47:04 +03:00
|
|
|
if (!_journal->setFileRecord(SyncJournalFileRecord(*job->_item, _localPath + job->_item->_file))) {
|
|
|
|
qWarning() << "database error";
|
|
|
|
job->_item->_status = SyncFileItem::FatalError;
|
|
|
|
job->_item->_errorString = tr("Error writing metadata to the database");
|
|
|
|
emit aborted(job->_item->_errorString);
|
2016-07-14 10:21:28 +03:00
|
|
|
deleteLater();
|
2016-04-07 12:47:04 +03:00
|
|
|
return;
|
|
|
|
}
|
2014-07-28 14:12:52 +04:00
|
|
|
}
|
|
|
|
// Continue with the next entry, or finish
|
|
|
|
start();
|
|
|
|
}
|
|
|
|
|
2013-05-04 18:12:51 +04:00
|
|
|
}
|