2013-05-03 21:11:00 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) by Olivier Goffart <ogoffart@owncloud.com>
|
|
|
|
* Copyright (C) by Klaas Freitag <freitag@owncloud.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
|
|
|
|
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* for more details.
|
|
|
|
*/
|
|
|
|
|
2014-02-18 14:52:38 +04:00
|
|
|
#include "owncloudpropagator.h"
|
2013-10-16 13:59:54 +04:00
|
|
|
#include "syncjournaldb.h"
|
2013-10-28 13:47:10 +04:00
|
|
|
#include "syncjournalfilerecord.h"
|
2014-11-11 14:16:14 +03:00
|
|
|
#include "propagatedownload.h"
|
|
|
|
#include "propagateupload.h"
|
2014-11-11 15:19:29 +03:00
|
|
|
#include "propagateremotedelete.h"
|
2014-11-11 18:09:01 +03:00
|
|
|
#include "propagateremotemove.h"
|
2014-11-13 20:57:07 +03:00
|
|
|
#include "propagateremotemkdir.h"
|
2014-02-18 14:52:38 +04:00
|
|
|
#include "propagatorjobs.h"
|
2015-01-23 19:09:48 +03:00
|
|
|
#ifdef USE_NEON
|
2014-02-18 14:52:38 +04:00
|
|
|
#include "propagator_legacy.h"
|
2015-01-23 19:09:48 +03:00
|
|
|
#endif
|
2014-11-10 01:25:57 +03:00
|
|
|
#include "configfile.h"
|
2014-07-11 02:31:24 +04:00
|
|
|
#include "utility.h"
|
2015-03-27 13:11:44 +03:00
|
|
|
#include "account.h"
|
2014-07-28 14:12:52 +04:00
|
|
|
#include <json.h>
|
2013-05-03 21:11:00 +04:00
|
|
|
|
2014-05-23 20:54:35 +04:00
|
|
|
#ifdef Q_OS_WIN
|
|
|
|
#include <windef.h>
|
|
|
|
#include <winbase.h>
|
|
|
|
#endif
|
|
|
|
|
2014-02-18 14:52:38 +04:00
|
|
|
#include <QStack>
|
2014-05-26 19:36:52 +04:00
|
|
|
#include <QFileInfo>
|
2014-07-07 15:00:38 +04:00
|
|
|
#include <QDir>
|
2014-09-29 12:30:39 +04:00
|
|
|
#include <QTimer>
|
|
|
|
#include <QObject>
|
|
|
|
#include <QTimerEvent>
|
2015-03-30 09:41:37 +03:00
|
|
|
#include <QDebug>
|
2013-05-03 21:11:00 +04:00
|
|
|
|
2014-11-10 00:34:07 +03:00
|
|
|
namespace OCC {
|
2013-12-10 20:19:25 +04:00
|
|
|
|
2015-03-27 13:11:44 +03:00
|
|
|
OwncloudPropagator::~OwncloudPropagator()
|
|
|
|
{}
|
|
|
|
|
2014-02-12 14:07:34 +04:00
|
|
|
/* The maximum number of active job in parallel */
|
2014-09-15 19:55:55 +04:00
|
|
|
int OwncloudPropagator::maximumActiveJob()
|
|
|
|
{
|
2014-03-28 14:20:07 +04:00
|
|
|
static int max = qgetenv("OWNCLOUD_MAX_PARALLEL").toUInt();
|
|
|
|
if (!max) {
|
|
|
|
max = 3; //default
|
|
|
|
}
|
|
|
|
return max;
|
|
|
|
}
|
2014-02-12 14:07:34 +04:00
|
|
|
|
2014-10-09 16:49:51 +04:00
|
|
|
/** Updates or creates a blacklist entry for the given item.
|
|
|
|
*
|
|
|
|
* Returns whether the file is in the blacklist now.
|
|
|
|
*/
|
|
|
|
static bool blacklist(SyncJournalDb* journal, const SyncFileItem& item)
|
|
|
|
{
|
2015-01-16 12:17:19 +03:00
|
|
|
SyncJournalErrorBlacklistRecord oldEntry = journal->errorBlacklistEntry(item._file);
|
|
|
|
SyncJournalErrorBlacklistRecord newEntry = SyncJournalErrorBlacklistRecord::update(oldEntry, item);
|
2014-10-09 16:49:51 +04:00
|
|
|
|
|
|
|
if (newEntry.isValid()) {
|
2015-01-16 12:17:19 +03:00
|
|
|
journal->updateErrorBlacklistEntry(newEntry);
|
2014-10-09 16:49:51 +04:00
|
|
|
} else if (oldEntry.isValid()) {
|
2015-01-16 12:17:19 +03:00
|
|
|
journal->wipeErrorBlacklistEntry(item._file);
|
2014-10-09 16:49:51 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
return newEntry.isValid();
|
|
|
|
}
|
|
|
|
|
2013-11-20 16:44:01 +04:00
|
|
|
void PropagateItemJob::done(SyncFileItem::Status status, const QString &errorString)
|
|
|
|
{
|
2014-11-18 19:35:31 +03:00
|
|
|
_state = Finished;
|
2014-06-07 13:49:46 +04:00
|
|
|
if (_item._isRestoration) {
|
|
|
|
if( status == SyncFileItem::Success || status == SyncFileItem::Conflict) {
|
2014-06-23 15:56:17 +04:00
|
|
|
status = SyncFileItem::Restoration;
|
2014-06-07 13:49:46 +04:00
|
|
|
} else {
|
2015-02-06 00:00:13 +03:00
|
|
|
_item._errorString += tr("; Restoration Failed: %1").arg(errorString);
|
2014-06-07 13:49:46 +04:00
|
|
|
}
|
|
|
|
} else {
|
2014-09-25 17:03:54 +04:00
|
|
|
if( _item._errorString.isEmpty() ) {
|
|
|
|
_item._errorString = errorString;
|
|
|
|
}
|
2014-06-07 13:49:46 +04:00
|
|
|
}
|
2014-08-19 16:14:01 +04:00
|
|
|
|
2014-10-10 16:52:37 +04:00
|
|
|
if( _propagator->_abortRequested.fetchAndAddRelaxed(0) &&
|
|
|
|
(status == SyncFileItem::NormalError || status == SyncFileItem::FatalError)) {
|
2014-08-19 16:14:01 +04:00
|
|
|
// an abort request is ongoing. Change the status to Soft-Error
|
|
|
|
status = SyncFileItem::SoftError;
|
|
|
|
}
|
|
|
|
|
2013-11-20 16:44:01 +04:00
|
|
|
switch( status ) {
|
2013-11-26 14:31:05 +04:00
|
|
|
case SyncFileItem::SoftError:
|
2013-11-20 16:44:01 +04:00
|
|
|
case SyncFileItem::FatalError:
|
2014-05-26 14:27:16 +04:00
|
|
|
// do not blacklist in case of soft error or fatal error.
|
|
|
|
break;
|
2013-11-20 16:44:01 +04:00
|
|
|
case SyncFileItem::NormalError:
|
2014-10-09 16:49:51 +04:00
|
|
|
if (blacklist(_propagator->_journal, _item) && _item._hasBlacklistEntry) {
|
|
|
|
// do not error if the item was, and continues to be, blacklisted
|
|
|
|
status = SyncFileItem::FileIgnored;
|
2015-02-06 00:00:13 +03:00
|
|
|
_item._errorString.prepend(tr("Continue blacklisting:") + " ");
|
2014-05-02 19:25:17 +04:00
|
|
|
}
|
2013-11-20 16:44:01 +04:00
|
|
|
break;
|
|
|
|
case SyncFileItem::Success:
|
2014-06-23 15:56:17 +04:00
|
|
|
case SyncFileItem::Restoration:
|
2014-10-08 11:07:05 +04:00
|
|
|
if( _item._hasBlacklistEntry ) {
|
2013-11-20 16:44:01 +04:00
|
|
|
// wipe blacklist entry.
|
2015-01-16 12:17:19 +03:00
|
|
|
_propagator->_journal->wipeErrorBlacklistEntry(_item._file);
|
2014-10-03 13:35:18 +04:00
|
|
|
// remove a blacklist entry in case the file was moved.
|
|
|
|
if( _item._originalFile != _item._file ) {
|
2015-01-16 12:17:19 +03:00
|
|
|
_propagator->_journal->wipeErrorBlacklistEntry(_item._originalFile);
|
2014-10-03 13:35:18 +04:00
|
|
|
}
|
2013-11-20 16:44:01 +04:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case SyncFileItem::Conflict:
|
|
|
|
case SyncFileItem::FileIgnored:
|
|
|
|
case SyncFileItem::NoStatus:
|
|
|
|
// nothing
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-10-09 16:49:51 +04:00
|
|
|
_item._status = status;
|
|
|
|
|
2013-11-20 16:44:01 +04:00
|
|
|
emit completed(_item);
|
|
|
|
emit finished(status);
|
|
|
|
}
|
|
|
|
|
2014-02-12 16:44:55 +04:00
|
|
|
/**
|
|
|
|
* For delete or remove, check that we are not removing from a shared directory.
|
|
|
|
* If we are, try to restore the file
|
|
|
|
*
|
|
|
|
* Return true if the problem is handled.
|
|
|
|
*/
|
2014-02-27 15:02:22 +04:00
|
|
|
bool PropagateItemJob::checkForProblemsWithShared(int httpStatusCode, const QString& msg)
|
2014-02-04 18:01:10 +04:00
|
|
|
{
|
2014-02-19 20:21:01 +04:00
|
|
|
PropagateItemJob *newJob = NULL;
|
|
|
|
|
2014-02-04 18:01:10 +04:00
|
|
|
if( httpStatusCode == 403 && _propagator->isInSharedDirectory(_item._file )) {
|
2014-03-20 17:58:56 +04:00
|
|
|
if( !_item._isDirectory ) {
|
2014-02-12 16:44:55 +04:00
|
|
|
SyncFileItem downloadItem(_item);
|
2014-02-21 13:53:09 +04:00
|
|
|
if (downloadItem._instruction == CSYNC_INSTRUCTION_NEW) {
|
|
|
|
// don't try to recover pushing new files
|
|
|
|
return false;
|
|
|
|
} else if (downloadItem._instruction == CSYNC_INSTRUCTION_SYNC) {
|
|
|
|
// we modified the file locally, jsut create a conflict then
|
|
|
|
downloadItem._instruction = CSYNC_INSTRUCTION_CONFLICT;
|
2014-04-04 12:50:40 +04:00
|
|
|
|
|
|
|
// HACK to avoid continuation: See task #1448: We do not know the _modtime from the
|
|
|
|
// server, at this point, so just set the current one. (rather than the one locally)
|
|
|
|
downloadItem._modtime = Utility::qDateTimeToTime_t(QDateTime::currentDateTime());
|
2014-02-21 13:53:09 +04:00
|
|
|
} else {
|
|
|
|
// the file was removed or renamed, just recover the old one
|
|
|
|
downloadItem._instruction = CSYNC_INSTRUCTION_SYNC;
|
|
|
|
}
|
2014-03-20 17:56:31 +04:00
|
|
|
downloadItem._direction = SyncFileItem::Down;
|
2015-01-23 19:09:48 +03:00
|
|
|
#ifdef USE_NEON
|
2014-02-27 14:40:34 +04:00
|
|
|
newJob = new PropagateDownloadFileLegacy(_propagator, downloadItem);
|
2015-01-23 19:09:48 +03:00
|
|
|
#else
|
|
|
|
newJob = new PropagateDownloadFileQNAM(_propagator, downloadItem);
|
|
|
|
#endif
|
2014-02-12 16:44:55 +04:00
|
|
|
} else {
|
|
|
|
// Directories are harder to recover.
|
|
|
|
// But just re-create the directory, next sync will be able to recover the files
|
|
|
|
SyncFileItem mkdirItem(_item);
|
|
|
|
mkdirItem._instruction = CSYNC_INSTRUCTION_SYNC;
|
2014-03-20 17:56:31 +04:00
|
|
|
mkdirItem._direction = SyncFileItem::Down;
|
2014-02-19 20:21:01 +04:00
|
|
|
newJob = new PropagateLocalMkdir(_propagator, mkdirItem);
|
2014-02-12 16:44:55 +04:00
|
|
|
// Also remove the inodes and fileid from the db so no further renames are tried for
|
|
|
|
// this item.
|
|
|
|
_propagator->_journal->avoidRenamesOnNextSync(_item._file);
|
2014-09-10 19:25:13 +04:00
|
|
|
_propagator->_anotherSyncNeeded = true;
|
2014-02-12 16:44:55 +04:00
|
|
|
}
|
2014-02-19 20:21:01 +04:00
|
|
|
if( newJob ) {
|
|
|
|
newJob->setRestoreJobMsg(msg);
|
|
|
|
_restoreJob.reset(newJob);
|
|
|
|
connect(_restoreJob.data(), SIGNAL(completed(SyncFileItem)),
|
|
|
|
this, SLOT(slotRestoreJobCompleted(SyncFileItem)));
|
2014-04-29 18:56:19 +04:00
|
|
|
QMetaObject::invokeMethod(newJob, "start");
|
2014-02-19 20:21:01 +04:00
|
|
|
}
|
2014-02-04 18:01:10 +04:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void PropagateItemJob::slotRestoreJobCompleted(const SyncFileItem& item )
|
|
|
|
{
|
2014-02-19 20:21:01 +04:00
|
|
|
QString msg;
|
|
|
|
if(_restoreJob) {
|
|
|
|
msg = _restoreJob->restoreJobMsg();
|
|
|
|
_restoreJob->setRestoreJobMsg();
|
|
|
|
}
|
|
|
|
|
2014-06-23 15:56:17 +04:00
|
|
|
if( item._status == SyncFileItem::Success || item._status == SyncFileItem::Conflict
|
|
|
|
|| item._status == SyncFileItem::Restoration) {
|
2014-02-19 20:21:01 +04:00
|
|
|
done( SyncFileItem::SoftError, msg);
|
2014-02-04 18:01:10 +04:00
|
|
|
} else {
|
2014-02-19 20:21:01 +04:00
|
|
|
done( item._status, tr("A file or directory was removed from a read only share, but restoring failed: %1").arg(item._errorString) );
|
2014-02-04 18:01:10 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-23 20:54:35 +04:00
|
|
|
// ================================================================================
|
|
|
|
|
2013-10-28 13:47:10 +04:00
|
|
|
PropagateItemJob* OwncloudPropagator::createJob(const SyncFileItem& item) {
|
|
|
|
switch(item._instruction) {
|
|
|
|
case CSYNC_INSTRUCTION_REMOVE:
|
2014-03-20 17:56:31 +04:00
|
|
|
if (item._direction == SyncFileItem::Down) return new PropagateLocalRemove(this, item);
|
2014-11-11 15:19:29 +03:00
|
|
|
else return new PropagateRemoteDelete(this, item);
|
2013-10-28 13:47:10 +04:00
|
|
|
case CSYNC_INSTRUCTION_NEW:
|
|
|
|
if (item._isDirectory) {
|
2014-03-20 17:56:31 +04:00
|
|
|
if (item._direction == SyncFileItem::Down) return new PropagateLocalMkdir(this, item);
|
2013-10-28 13:47:10 +04:00
|
|
|
else return new PropagateRemoteMkdir(this, item);
|
|
|
|
} //fall trough
|
|
|
|
case CSYNC_INSTRUCTION_SYNC:
|
|
|
|
case CSYNC_INSTRUCTION_CONFLICT:
|
|
|
|
if (item._isDirectory) {
|
|
|
|
// Should we set the mtime?
|
|
|
|
return 0;
|
|
|
|
}
|
2015-01-23 19:09:48 +03:00
|
|
|
#ifdef USE_NEON
|
2014-02-18 15:24:35 +04:00
|
|
|
if (useLegacyJobs()) {
|
2014-03-20 17:56:31 +04:00
|
|
|
if (item._direction != SyncFileItem::Up) {
|
2014-02-18 15:24:35 +04:00
|
|
|
return new PropagateDownloadFileLegacy(this, item);
|
|
|
|
} else {
|
|
|
|
return new PropagateUploadFileLegacy(this, item);
|
|
|
|
}
|
2015-01-23 19:09:48 +03:00
|
|
|
} else
|
|
|
|
#endif
|
|
|
|
{
|
2014-03-20 17:56:31 +04:00
|
|
|
if (item._direction != SyncFileItem::Up) {
|
2014-02-18 15:24:35 +04:00
|
|
|
return new PropagateDownloadFileQNAM(this, item);
|
|
|
|
} else {
|
|
|
|
return new PropagateUploadFileQNAM(this, item);
|
|
|
|
}
|
|
|
|
}
|
2013-10-28 13:47:10 +04:00
|
|
|
case CSYNC_INSTRUCTION_RENAME:
|
2014-03-20 17:56:31 +04:00
|
|
|
if (item._direction == SyncFileItem::Up) {
|
2014-11-11 18:09:01 +03:00
|
|
|
return new PropagateRemoteMove(this, item);
|
2013-10-30 20:36:58 +04:00
|
|
|
} else {
|
|
|
|
return new PropagateLocalRename(this, item);
|
|
|
|
}
|
2013-10-28 13:47:10 +04:00
|
|
|
case CSYNC_INSTRUCTION_IGNORE:
|
2014-06-13 13:19:31 +04:00
|
|
|
case CSYNC_INSTRUCTION_ERROR:
|
2013-10-28 13:47:10 +04:00
|
|
|
return new PropagateIgnoreJob(this, item);
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2013-08-14 21:59:16 +04:00
|
|
|
|
2014-06-27 15:34:15 +04:00
|
|
|
void OwncloudPropagator::start(const SyncFileItemVector& items)
|
2013-10-28 13:47:10 +04:00
|
|
|
{
|
2015-02-12 21:15:55 +03:00
|
|
|
Q_ASSERT(std::is_sorted(items.begin(), items.end()));
|
|
|
|
|
2013-10-28 20:00:27 +04:00
|
|
|
/* This builds all the job needed for the propagation.
|
|
|
|
* Each directories is a PropagateDirectory job, which contains the files in it.
|
2014-06-27 15:34:15 +04:00
|
|
|
* In order to do that we loop over the items. (which are sorted by destination)
|
|
|
|
* When we enter adirectory, we can create the directory job and push it on the stack. */
|
|
|
|
|
2013-10-28 13:47:10 +04:00
|
|
|
_rootJob.reset(new PropagateDirectory(this));
|
2013-10-28 20:00:27 +04:00
|
|
|
QStack<QPair<QString /* directory name */, PropagateDirectory* /* job */> > directories;
|
2013-10-28 13:47:10 +04:00
|
|
|
directories.push(qMakePair(QString(), _rootJob.data()));
|
2013-10-28 20:00:27 +04:00
|
|
|
QVector<PropagatorJob*> directoriesToRemove;
|
2013-10-28 13:47:10 +04:00
|
|
|
QString removedDirectory;
|
|
|
|
foreach(const SyncFileItem &item, items) {
|
2014-06-04 18:37:46 +04:00
|
|
|
|
|
|
|
if (!removedDirectory.isEmpty() && item._file.startsWith(removedDirectory)) {
|
|
|
|
// this is an item in a directory which is going to be removed.
|
2014-12-06 14:27:50 +03:00
|
|
|
PropagateDirectory *delDirJob = dynamic_cast<PropagateDirectory*>(directoriesToRemove.last());
|
|
|
|
|
2014-06-04 18:37:46 +04:00
|
|
|
if (item._instruction == CSYNC_INSTRUCTION_REMOVE) {
|
|
|
|
//already taken care of. (by the removal of the parent directory)
|
2014-12-06 14:27:50 +03:00
|
|
|
|
|
|
|
// increase the number of subjobs that would be there.
|
|
|
|
if( delDirJob ) {
|
|
|
|
delDirJob->increaseAffectedCount();
|
|
|
|
}
|
2014-06-04 18:37:46 +04:00
|
|
|
continue;
|
|
|
|
} else if (item._instruction == CSYNC_INSTRUCTION_NEW && item._isDirectory) {
|
|
|
|
// create a new directory within a deleted directory? That can happen if the directory
|
|
|
|
// etag were not fetched properly on the previous sync because the sync was aborted
|
|
|
|
// while uploading this directory (which is now removed). We can ignore it.
|
2014-12-06 14:27:50 +03:00
|
|
|
if( delDirJob ) {
|
|
|
|
delDirJob->increaseAffectedCount();
|
|
|
|
}
|
2014-06-04 18:37:46 +04:00
|
|
|
continue;
|
|
|
|
} else if (item._instruction == CSYNC_INSTRUCTION_IGNORE) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
qWarning() << "WARNING: Job within a removed directory? This should not happen!"
|
|
|
|
<< item._file << item._instruction;
|
2013-08-14 21:59:16 +04:00
|
|
|
}
|
|
|
|
|
2014-02-19 16:06:55 +04:00
|
|
|
while (!item.destination().startsWith(directories.top().first)) {
|
2013-10-28 13:47:10 +04:00
|
|
|
directories.pop();
|
2013-08-14 21:59:16 +04:00
|
|
|
}
|
2013-10-28 13:47:10 +04:00
|
|
|
|
|
|
|
if (item._isDirectory) {
|
|
|
|
PropagateDirectory *dir = new PropagateDirectory(this, item);
|
|
|
|
dir->_firstJob.reset(createJob(item));
|
|
|
|
if (item._instruction == CSYNC_INSTRUCTION_REMOVE) {
|
2014-06-04 14:31:30 +04:00
|
|
|
//We do the removal of directories at the end, because there might be moves from
|
|
|
|
// this directories that will happen later.
|
2013-10-28 13:47:10 +04:00
|
|
|
directoriesToRemove.append(dir);
|
|
|
|
removedDirectory = item._file + "/";
|
2014-06-04 14:31:30 +04:00
|
|
|
|
|
|
|
// We should not update the etag of parent directories of the removed directory
|
|
|
|
// since it would be done before the actual remove (issue #1845)
|
|
|
|
// NOTE: Currently this means that we don't update those etag at all in this sync,
|
|
|
|
// but it should not be a problem, they will be updated in the next sync.
|
|
|
|
for (int i = 0; i < directories.size(); ++i) {
|
|
|
|
directories[i].second->_item._should_update_etag = false;
|
|
|
|
}
|
2013-10-28 13:47:10 +04:00
|
|
|
} else {
|
2014-06-04 14:31:30 +04:00
|
|
|
PropagateDirectory* currentDirJob = directories.top().second;
|
|
|
|
currentDirJob->append(dir);
|
2013-08-14 21:59:16 +04:00
|
|
|
}
|
2014-02-19 16:06:55 +04:00
|
|
|
directories.push(qMakePair(item.destination() + "/" , dir));
|
2013-10-28 13:47:10 +04:00
|
|
|
} else if (PropagateItemJob* current = createJob(item)) {
|
|
|
|
directories.top().second->append(current);
|
2013-08-14 21:59:16 +04:00
|
|
|
}
|
|
|
|
}
|
2013-10-28 13:47:10 +04:00
|
|
|
|
|
|
|
foreach(PropagatorJob* it, directoriesToRemove) {
|
|
|
|
_rootJob->append(it);
|
|
|
|
}
|
|
|
|
|
|
|
|
connect(_rootJob.data(), SIGNAL(completed(SyncFileItem)), this, SIGNAL(completed(SyncFileItem)));
|
2014-03-14 16:03:16 +04:00
|
|
|
connect(_rootJob.data(), SIGNAL(progress(SyncFileItem,quint64)), this, SIGNAL(progress(SyncFileItem,quint64)));
|
2014-05-29 14:15:13 +04:00
|
|
|
connect(_rootJob.data(), SIGNAL(finished(SyncFileItem::Status)), this, SLOT(emitFinished()));
|
2014-11-18 19:35:31 +03:00
|
|
|
connect(_rootJob.data(), SIGNAL(ready()), this, SLOT(scheduleNextJob()), Qt::QueuedConnection);
|
2013-11-21 14:13:58 +04:00
|
|
|
|
2014-04-03 18:37:40 +04:00
|
|
|
qDebug() << (useLegacyJobs() ? "Using legacy libneon/HTTP sequential code path" : "Using QNAM/HTTP parallel code path");
|
|
|
|
|
2014-11-18 19:35:31 +03:00
|
|
|
QTimer::singleShot(0, this, SLOT(scheduleNextJob()));
|
2013-08-14 21:59:16 +04:00
|
|
|
}
|
2013-05-03 21:11:00 +04:00
|
|
|
|
2014-02-04 18:01:10 +04:00
|
|
|
bool OwncloudPropagator::isInSharedDirectory(const QString& file)
|
|
|
|
{
|
|
|
|
bool re = false;
|
|
|
|
if( _remoteDir.contains("remote.php/webdav/Shared") ) {
|
|
|
|
// The Shared directory is synced as its own sync connection
|
|
|
|
re = true;
|
|
|
|
} else {
|
2014-02-19 18:23:36 +04:00
|
|
|
if( file.startsWith("Shared/") || file == "Shared" ) {
|
2014-02-04 18:01:10 +04:00
|
|
|
// The whole ownCloud is synced and Shared is always a top dir
|
|
|
|
re = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return re;
|
|
|
|
}
|
|
|
|
|
2014-02-18 15:24:35 +04:00
|
|
|
/**
|
|
|
|
* Return true if we should use the legacy jobs.
|
|
|
|
* Some feature are not supported by QNAM and therefore we still use the legacy jobs
|
|
|
|
* for this case.
|
|
|
|
*/
|
|
|
|
bool OwncloudPropagator::useLegacyJobs()
|
|
|
|
{
|
2015-01-23 19:09:48 +03:00
|
|
|
#ifdef USE_NEON
|
2014-09-29 12:30:39 +04:00
|
|
|
// Allow an environement variable for debugging
|
|
|
|
QByteArray env = qgetenv("OWNCLOUD_USE_LEGACY_JOBS");
|
|
|
|
if (env=="true" || env =="1") {
|
2014-10-07 16:55:30 +04:00
|
|
|
qDebug() << "Force Legacy Propagator ACTIVATED";
|
2014-09-29 12:30:39 +04:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-02-18 15:24:35 +04:00
|
|
|
if (_downloadLimit.fetchAndAddAcquire(0) != 0 || _uploadLimit.fetchAndAddAcquire(0) != 0) {
|
2014-11-14 19:29:28 +03:00
|
|
|
// QNAM bandwith limiting only work with version of Qt greater or equal to 5.3.3
|
|
|
|
// (It needs Qt commits 097b641 and b99fa32)
|
|
|
|
#if QT_VERSION >= QT_VERSION_CHECK(5,3,3)
|
2014-09-29 12:30:39 +04:00
|
|
|
return false;
|
2014-11-14 19:29:28 +03:00
|
|
|
#elif QT_VERSION >= QT_VERSION_CHECK(5,0,0)
|
|
|
|
env = qgetenv("OWNCLOUD_NEW_BANDWIDTH_LIMITING");
|
|
|
|
if (env=="true" || env =="1") {
|
|
|
|
qDebug() << "New Bandwidth Limiting Code ACTIVATED";
|
2014-11-19 12:31:42 +03:00
|
|
|
return false;
|
2014-11-14 19:29:28 +03:00
|
|
|
}
|
2014-09-29 12:30:39 +04:00
|
|
|
|
2014-11-14 19:29:28 +03:00
|
|
|
// Do a runtime check.
|
|
|
|
// (Poor man's version comparison)
|
|
|
|
const char *v = qVersion(); // "x.y.z";
|
|
|
|
if (QLatin1String(v) >= QLatin1String("5.3.3")) {
|
2014-11-19 12:31:42 +03:00
|
|
|
return false;
|
2014-11-14 19:29:28 +03:00
|
|
|
} else {
|
|
|
|
qDebug() << "Use legacy jobs because qt version is only" << v << "while 5.3.3 is needed";
|
2014-11-19 12:31:42 +03:00
|
|
|
return true;
|
2014-11-14 19:29:28 +03:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
qDebug() << "Use legacy jobs because of Qt4";
|
2014-02-18 15:24:35 +04:00
|
|
|
return true;
|
2014-11-14 19:29:28 +03:00
|
|
|
#endif
|
2014-02-18 15:24:35 +04:00
|
|
|
}
|
2015-01-23 19:09:48 +03:00
|
|
|
#endif // USE_NEON
|
2014-09-29 12:30:39 +04:00
|
|
|
return false;
|
2014-02-18 15:24:35 +04:00
|
|
|
}
|
|
|
|
|
2014-05-23 20:54:35 +04:00
|
|
|
int OwncloudPropagator::httpTimeout()
|
|
|
|
{
|
|
|
|
static int timeout;
|
|
|
|
if (!timeout) {
|
|
|
|
timeout = qgetenv("OWNCLOUD_TIMEOUT").toUInt();
|
|
|
|
if (timeout == 0) {
|
2014-11-10 00:30:29 +03:00
|
|
|
ConfigFile cfg;
|
2014-05-28 18:28:22 +04:00
|
|
|
timeout = cfg.timeout();
|
2014-05-23 20:54:35 +04:00
|
|
|
}
|
2014-05-28 18:28:22 +04:00
|
|
|
|
2014-05-23 20:54:35 +04:00
|
|
|
}
|
|
|
|
return timeout;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool OwncloudPropagator::localFileNameClash( const QString& relFile )
|
|
|
|
{
|
|
|
|
bool re = false;
|
|
|
|
const QString file( _localDir + relFile );
|
2014-05-26 19:36:52 +04:00
|
|
|
|
2014-05-26 19:52:24 +04:00
|
|
|
if( !file.isEmpty() && Utility::fsCasePreserving() ) {
|
|
|
|
#ifdef Q_OS_MAC
|
|
|
|
QFileInfo fileInfo(file);
|
2014-05-30 17:46:51 +04:00
|
|
|
if (!fileInfo.exists()) {
|
2014-05-26 19:52:24 +04:00
|
|
|
re = false;
|
2014-07-23 19:54:12 +04:00
|
|
|
qDebug() << Q_FUNC_INFO << "No valid fileinfo";
|
2014-05-30 17:46:51 +04:00
|
|
|
} else {
|
2014-07-23 19:54:12 +04:00
|
|
|
// Need to normalize to composited form because of
|
|
|
|
// https://bugreports.qt-project.org/browse/QTBUG-39622
|
|
|
|
const QString cName = fileInfo.canonicalFilePath().normalized(QString::NormalizationForm_C);
|
|
|
|
// qDebug() << Q_FUNC_INFO << "comparing " << cName << " with " << file;
|
|
|
|
bool equal = (file == cName);
|
|
|
|
re = (!equal && ! cName.endsWith(relFile, Qt::CaseSensitive) );
|
|
|
|
// qDebug() << Q_FUNC_INFO << "Returning for localFileNameClash: " << re;
|
2014-05-30 17:46:51 +04:00
|
|
|
}
|
2014-05-26 19:52:24 +04:00
|
|
|
#elif defined(Q_OS_WIN)
|
|
|
|
const QString file( _localDir + relFile );
|
|
|
|
qDebug() << "CaseClashCheck for " << file;
|
|
|
|
WIN32_FIND_DATA FindFileData;
|
|
|
|
HANDLE hFind;
|
|
|
|
|
|
|
|
hFind = FindFirstFileW( (wchar_t*)file.utf16(), &FindFileData);
|
|
|
|
if (hFind == INVALID_HANDLE_VALUE) {
|
2014-06-02 21:38:04 +04:00
|
|
|
//qDebug() << "FindFirstFile failed " << GetLastError();
|
2014-05-26 19:52:24 +04:00
|
|
|
// returns false.
|
|
|
|
} else {
|
|
|
|
QString realFileName = QString::fromWCharArray( FindFileData.cFileName );
|
|
|
|
FindClose(hFind);
|
2014-05-23 20:54:35 +04:00
|
|
|
|
2014-05-26 19:52:24 +04:00
|
|
|
if( ! file.endsWith(realFileName, Qt::CaseSensitive) ) {
|
2014-10-06 17:39:49 +04:00
|
|
|
qDebug() << Q_FUNC_INFO << "Detected case clash between" << file << "and" << realFileName;
|
2014-05-26 19:52:24 +04:00
|
|
|
re = true;
|
|
|
|
}
|
2014-05-23 20:54:35 +04:00
|
|
|
}
|
2014-07-07 15:00:38 +04:00
|
|
|
#else
|
|
|
|
// On Linux, the file system is case sensitive, but this code is usefull for testing.
|
|
|
|
// Just check that there is no other file with the same name and different casing.
|
|
|
|
QFileInfo fileInfo(file);
|
|
|
|
const QString fn = fileInfo.fileName();
|
|
|
|
QStringList list = fileInfo.dir().entryList(QStringList() << fn);
|
|
|
|
if (list.count() > 1 || (list.count() == 1 && list[0] != fn)) {
|
|
|
|
re = true;
|
|
|
|
}
|
2014-05-23 20:54:35 +04:00
|
|
|
#endif
|
2014-05-26 19:52:24 +04:00
|
|
|
}
|
2014-05-23 20:54:35 +04:00
|
|
|
return re;
|
|
|
|
}
|
|
|
|
|
2014-09-03 14:11:03 +04:00
|
|
|
QString OwncloudPropagator::getFilePath(const QString& tmp_file_name) const
|
|
|
|
{
|
|
|
|
return _localDir + tmp_file_name;
|
|
|
|
}
|
|
|
|
|
2014-11-18 19:35:31 +03:00
|
|
|
void OwncloudPropagator::scheduleNextJob()
|
|
|
|
{
|
|
|
|
if (this->_activeJobs < maximumActiveJob()) {
|
|
|
|
if (_rootJob->scheduleNextJob()) {
|
|
|
|
QTimer::singleShot(100, this, SLOT(scheduleNextJob()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-07 13:41:21 +03:00
|
|
|
void OwncloudPropagator::addTouchedFile(const QString& fn)
|
|
|
|
{
|
|
|
|
QString file = QDir::cleanPath(fn);
|
|
|
|
|
|
|
|
QElapsedTimer timer;
|
|
|
|
timer.start();
|
|
|
|
|
|
|
|
QMutexLocker lock(&_touchedFilesMutex);
|
|
|
|
_touchedFiles.insert(file, timer);
|
|
|
|
}
|
|
|
|
|
|
|
|
qint64 OwncloudPropagator::timeSinceFileTouched(const QString& fn) const
|
|
|
|
{
|
|
|
|
QMutexLocker lock(&_touchedFilesMutex);
|
|
|
|
if (! _touchedFiles.contains(fn)) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return _touchedFiles[fn].elapsed();
|
|
|
|
}
|
2014-11-18 19:35:31 +03:00
|
|
|
|
2014-12-18 14:09:48 +03:00
|
|
|
AccountPtr OwncloudPropagator::account() const
|
|
|
|
{
|
|
|
|
return _account;
|
|
|
|
}
|
|
|
|
|
2014-05-23 20:54:35 +04:00
|
|
|
// ================================================================================
|
2014-02-18 15:24:35 +04:00
|
|
|
|
2014-11-18 19:35:31 +03:00
|
|
|
PropagatorJob::JobParallelism PropagateDirectory::parallelism()
|
2013-11-15 20:42:27 +04:00
|
|
|
{
|
2014-11-18 19:35:31 +03:00
|
|
|
// If any of the non-finished sub jobs is not parallel, we have to wait
|
|
|
|
|
|
|
|
// FIXME! we should probably cache this result
|
|
|
|
|
|
|
|
if (_firstJob && _firstJob->_state != Finished) {
|
|
|
|
if (_firstJob->parallelism() != FullParallelism)
|
|
|
|
return WaitForFinished;
|
|
|
|
}
|
|
|
|
|
|
|
|
// FIXME: use the cached value of finished job
|
|
|
|
for (int i = 0; i < _subJobs.count(); ++i) {
|
|
|
|
if (_subJobs.at(i)->_state != Finished && _subJobs.at(i)->parallelism() != FullParallelism) {
|
|
|
|
return WaitForFinished;
|
|
|
|
}
|
2013-11-15 20:42:27 +04:00
|
|
|
}
|
2014-11-18 19:35:31 +03:00
|
|
|
return FullParallelism;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool PropagateDirectory::scheduleNextJob()
|
|
|
|
{
|
|
|
|
if (_state == Finished) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (_state == NotYetStarted) {
|
|
|
|
_state = Running;
|
|
|
|
|
|
|
|
if (!_firstJob && _subJobs.isEmpty()) {
|
2015-02-12 21:54:30 +03:00
|
|
|
finalize();
|
2014-11-18 19:35:31 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (_firstJob && _firstJob->_state == NotYetStarted) {
|
|
|
|
return possiblyRunNextJob(_firstJob.data());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (_firstJob && _firstJob->_state == Running) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool stopAtDirectory = false;
|
|
|
|
// FIXME: use the cached value of finished job
|
|
|
|
for (int i = 0; i < _subJobs.count(); ++i) {
|
|
|
|
if (_subJobs.at(i)->_state == Finished) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (stopAtDirectory && qobject_cast<PropagateDirectory*>(_subJobs.at(i))) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (possiblyRunNextJob(_subJobs.at(i))) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
Q_ASSERT(_subJobs.at(i)->_state == Running);
|
|
|
|
|
|
|
|
auto paral = _subJobs.at(i)->parallelism();
|
|
|
|
if (paral == WaitForFinished) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (paral == WaitForFinishedInParentDirectory) {
|
|
|
|
stopAtDirectory = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
2013-11-15 20:42:27 +04:00
|
|
|
}
|
|
|
|
|
2014-02-06 15:11:45 +04:00
|
|
|
void PropagateDirectory::slotSubJobFinished(SyncFileItem::Status status)
|
2013-10-28 13:47:10 +04:00
|
|
|
{
|
2014-06-23 15:56:17 +04:00
|
|
|
if (status == SyncFileItem::FatalError ||
|
2014-11-18 19:35:31 +03:00
|
|
|
(sender() == _firstJob.data() && status != SyncFileItem::Success && status != SyncFileItem::Restoration)) {
|
2014-02-06 17:52:56 +04:00
|
|
|
abort();
|
2014-11-27 17:36:13 +03:00
|
|
|
_state = Finished;
|
2013-10-28 13:47:10 +04:00
|
|
|
emit finished(status);
|
|
|
|
return;
|
2013-11-28 13:00:12 +04:00
|
|
|
} else if (status == SyncFileItem::NormalError || status == SyncFileItem::SoftError) {
|
|
|
|
_hasError = status;
|
2013-10-28 13:47:10 +04:00
|
|
|
}
|
2014-02-12 14:07:34 +04:00
|
|
|
_runningNow--;
|
2013-10-28 13:47:10 +04:00
|
|
|
|
2014-11-18 19:35:31 +03:00
|
|
|
int total = _subJobs.count();
|
|
|
|
if (!_firstJob) {
|
|
|
|
total--;
|
2014-02-06 15:11:45 +04:00
|
|
|
}
|
|
|
|
|
2014-02-12 14:07:34 +04:00
|
|
|
_current++;
|
2014-11-18 19:35:31 +03:00
|
|
|
|
2014-02-12 14:07:34 +04:00
|
|
|
// We finished to processing all the jobs
|
2014-11-18 19:35:31 +03:00
|
|
|
// check if we finished
|
|
|
|
if (_current >= total) {
|
2015-02-12 21:54:30 +03:00
|
|
|
Q_ASSERT(!_runningNow); // how can we finished if there are still jobs running now
|
|
|
|
finalize();
|
|
|
|
} else {
|
|
|
|
emit ready();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void PropagateDirectory::finalize()
|
|
|
|
{
|
|
|
|
if (!_item.isEmpty() && _hasError == SyncFileItem::NoStatus) {
|
|
|
|
if( !_item._renameTarget.isEmpty() ) {
|
|
|
|
_item._file = _item._renameTarget;
|
|
|
|
}
|
2013-11-05 20:50:09 +04:00
|
|
|
|
2015-02-12 21:54:30 +03:00
|
|
|
if (_item._should_update_etag && _item._instruction != CSYNC_INSTRUCTION_REMOVE) {
|
|
|
|
if (PropagateRemoteMkdir* mkdir = qobject_cast<PropagateRemoteMkdir*>(_firstJob.data())) {
|
|
|
|
// special case from MKDIR, get the fileId from the job there
|
|
|
|
if (_item._fileId.isEmpty() && !mkdir->_item._fileId.isEmpty()) {
|
|
|
|
_item._fileId = mkdir->_item._fileId;
|
2014-06-12 15:45:25 +04:00
|
|
|
}
|
2013-11-15 16:53:18 +04:00
|
|
|
}
|
2015-02-12 21:54:30 +03:00
|
|
|
SyncJournalFileRecord record(_item, _propagator->_localDir + _item._file);
|
|
|
|
_propagator->_journal->setFileRecord(record);
|
2013-10-28 13:47:10 +04:00
|
|
|
}
|
|
|
|
}
|
2015-02-12 21:54:30 +03:00
|
|
|
_state = Finished;
|
|
|
|
emit finished(_hasError == SyncFileItem::NoStatus ? SyncFileItem::Success : _hasError);
|
2013-10-28 13:47:10 +04:00
|
|
|
}
|
2013-05-03 21:11:00 +04:00
|
|
|
|
2015-03-27 13:11:44 +03:00
|
|
|
CleanupPollsJob::~CleanupPollsJob()
|
|
|
|
{}
|
|
|
|
|
2014-07-28 14:12:52 +04:00
|
|
|
void CleanupPollsJob::start()
|
|
|
|
{
|
|
|
|
if (_pollInfos.empty()) {
|
|
|
|
emit finished();
|
|
|
|
deleteLater();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-11-11 12:10:46 +03:00
|
|
|
auto info = _pollInfos.first();
|
|
|
|
_pollInfos.pop_front();
|
2014-07-28 14:12:52 +04:00
|
|
|
SyncFileItem item;
|
|
|
|
item._file = info._file;
|
|
|
|
item._modtime = info._modtime;
|
|
|
|
PollJob *job = new PollJob(_account, info._url, item, _journal, _localPath, this);
|
|
|
|
connect(job, SIGNAL(finishedSignal()), SLOT(slotPollFinished()));
|
|
|
|
job->start();
|
|
|
|
}
|
|
|
|
|
|
|
|
void CleanupPollsJob::slotPollFinished()
|
|
|
|
{
|
|
|
|
PollJob *job = qobject_cast<PollJob *>(sender());
|
|
|
|
Q_ASSERT(job);
|
2014-07-29 17:51:22 +04:00
|
|
|
if (job->_item._status == SyncFileItem::FatalError) {
|
|
|
|
emit aborted(job->_item._errorString);
|
|
|
|
return;
|
|
|
|
} else if (job->_item._status != SyncFileItem::Success) {
|
|
|
|
qDebug() << "There was an error with file " << job->_item._file << job->_item._errorString;
|
2014-07-28 14:12:52 +04:00
|
|
|
} else {
|
|
|
|
_journal->setFileRecord(SyncJournalFileRecord(job->_item, _localPath + job->_item._file));
|
|
|
|
}
|
|
|
|
// Continue with the next entry, or finish
|
|
|
|
start();
|
|
|
|
}
|
|
|
|
|
2013-05-04 18:12:51 +04:00
|
|
|
}
|