2013-05-03 21:11:00 +04:00
|
|
|
/*
|
|
|
|
* Copyright (C) by Olivier Goffart <ogoffart@owncloud.com>
|
|
|
|
* Copyright (C) by Klaas Freitag <freitag@owncloud.com>
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
|
|
|
|
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
|
|
* for more details.
|
|
|
|
*/
|
|
|
|
|
2014-02-18 14:52:38 +04:00
|
|
|
#include "owncloudpropagator.h"
|
2013-10-16 13:59:54 +04:00
|
|
|
#include "syncjournaldb.h"
|
2013-10-28 13:47:10 +04:00
|
|
|
#include "syncjournalfilerecord.h"
|
2014-02-18 14:52:38 +04:00
|
|
|
#include "propagator_qnam.h"
|
|
|
|
#include "propagatorjobs.h"
|
|
|
|
#include "propagator_legacy.h"
|
2014-07-11 02:31:24 +04:00
|
|
|
#include "mirallconfigfile.h"
|
|
|
|
#include "utility.h"
|
2013-05-03 21:11:00 +04:00
|
|
|
|
2014-05-23 20:54:35 +04:00
|
|
|
#ifdef Q_OS_WIN
|
|
|
|
#include <windef.h>
|
|
|
|
#include <winbase.h>
|
|
|
|
#endif
|
|
|
|
|
2014-02-18 14:52:38 +04:00
|
|
|
#include <QStack>
|
2014-05-26 19:36:52 +04:00
|
|
|
#include <QFileInfo>
|
2014-07-07 15:00:38 +04:00
|
|
|
#include <QDir>
|
2013-05-03 21:11:00 +04:00
|
|
|
|
2014-02-18 14:52:38 +04:00
|
|
|
namespace Mirall {
|
2013-12-10 20:19:25 +04:00
|
|
|
|
2014-02-12 14:07:34 +04:00
|
|
|
/* The maximum number of active job in parallel */
|
2014-03-28 14:20:07 +04:00
|
|
|
static int maximumActiveJob() {
|
|
|
|
static int max = qgetenv("OWNCLOUD_MAX_PARALLEL").toUInt();
|
|
|
|
if (!max) {
|
|
|
|
max = 3; //default
|
|
|
|
}
|
|
|
|
return max;
|
|
|
|
}
|
2014-02-12 14:07:34 +04:00
|
|
|
|
2013-11-20 16:44:01 +04:00
|
|
|
void PropagateItemJob::done(SyncFileItem::Status status, const QString &errorString)
|
|
|
|
{
|
2014-06-07 13:49:46 +04:00
|
|
|
if (_item._isRestoration) {
|
|
|
|
if( status == SyncFileItem::Success || status == SyncFileItem::Conflict) {
|
2014-06-23 15:56:17 +04:00
|
|
|
status = SyncFileItem::Restoration;
|
2014-06-07 13:49:46 +04:00
|
|
|
} else {
|
|
|
|
_item._errorString += tr("; Restoration Failed: ") + errorString;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
_item._errorString = errorString;
|
|
|
|
}
|
2013-11-20 16:44:01 +04:00
|
|
|
_item._status = status;
|
|
|
|
|
|
|
|
// Blacklisting
|
|
|
|
int retries = 0;
|
|
|
|
|
2014-02-04 18:01:10 +04:00
|
|
|
if( _item._httpErrorCode == 403 ||_item._httpErrorCode == 413 || _item._httpErrorCode == 415 ) {
|
2013-12-12 14:38:41 +04:00
|
|
|
qDebug() << "Fatal Error condition" << _item._httpErrorCode << ", forbid retry!";
|
2013-11-20 16:44:01 +04:00
|
|
|
retries = -1;
|
|
|
|
} else {
|
2014-04-01 15:54:47 +04:00
|
|
|
static QAtomicInt defaultRetriesCount(qgetenv("OWNCLOUD_BLACKLIST_COUNT").toInt());
|
|
|
|
if (defaultRetriesCount.fetchAndAddAcquire(0) <= 0) {
|
|
|
|
defaultRetriesCount.fetchAndStoreRelease(3);
|
|
|
|
}
|
|
|
|
retries = defaultRetriesCount.fetchAndAddAcquire(0);
|
2013-11-20 16:44:01 +04:00
|
|
|
}
|
|
|
|
SyncJournalBlacklistRecord record(_item, retries);;
|
|
|
|
|
|
|
|
switch( status ) {
|
2013-11-26 14:31:05 +04:00
|
|
|
case SyncFileItem::SoftError:
|
2013-11-20 16:44:01 +04:00
|
|
|
case SyncFileItem::FatalError:
|
2014-05-26 14:27:16 +04:00
|
|
|
// do not blacklist in case of soft error or fatal error.
|
|
|
|
break;
|
2013-11-20 16:44:01 +04:00
|
|
|
case SyncFileItem::NormalError:
|
2014-05-02 19:25:17 +04:00
|
|
|
#ifdef OWNCLOUD_5XX_NO_BLACKLIST
|
|
|
|
if (_item._httpErrorCode / 100 == 5) {
|
|
|
|
// In this configuration, never blacklist error 5xx
|
|
|
|
qDebug() << "Do not blacklist error " << _item._httpErrorCode;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
#endif
|
2013-11-20 16:44:01 +04:00
|
|
|
_propagator->_journal->updateBlacklistEntry( record );
|
|
|
|
break;
|
|
|
|
case SyncFileItem::Success:
|
2014-06-23 15:56:17 +04:00
|
|
|
case SyncFileItem::Restoration:
|
2013-11-20 16:44:01 +04:00
|
|
|
if( _item._blacklistedInDb ) {
|
|
|
|
// wipe blacklist entry.
|
|
|
|
_propagator->_journal->wipeBlacklistEntry(_item._file);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case SyncFileItem::Conflict:
|
|
|
|
case SyncFileItem::FileIgnored:
|
|
|
|
case SyncFileItem::NoStatus:
|
|
|
|
// nothing
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
emit completed(_item);
|
|
|
|
emit finished(status);
|
|
|
|
}
|
|
|
|
|
2014-02-12 16:44:55 +04:00
|
|
|
/**
|
|
|
|
* For delete or remove, check that we are not removing from a shared directory.
|
|
|
|
* If we are, try to restore the file
|
|
|
|
*
|
|
|
|
* Return true if the problem is handled.
|
|
|
|
*/
|
2014-02-27 15:02:22 +04:00
|
|
|
bool PropagateItemJob::checkForProblemsWithShared(int httpStatusCode, const QString& msg)
|
2014-02-04 18:01:10 +04:00
|
|
|
{
|
2014-02-19 20:21:01 +04:00
|
|
|
PropagateItemJob *newJob = NULL;
|
|
|
|
|
2014-02-04 18:01:10 +04:00
|
|
|
if( httpStatusCode == 403 && _propagator->isInSharedDirectory(_item._file )) {
|
2014-03-20 17:58:56 +04:00
|
|
|
if( !_item._isDirectory ) {
|
2014-02-12 16:44:55 +04:00
|
|
|
SyncFileItem downloadItem(_item);
|
2014-02-21 13:53:09 +04:00
|
|
|
if (downloadItem._instruction == CSYNC_INSTRUCTION_NEW) {
|
|
|
|
// don't try to recover pushing new files
|
|
|
|
return false;
|
|
|
|
} else if (downloadItem._instruction == CSYNC_INSTRUCTION_SYNC) {
|
|
|
|
// we modified the file locally, jsut create a conflict then
|
|
|
|
downloadItem._instruction = CSYNC_INSTRUCTION_CONFLICT;
|
2014-04-04 12:50:40 +04:00
|
|
|
|
|
|
|
// HACK to avoid continuation: See task #1448: We do not know the _modtime from the
|
|
|
|
// server, at this point, so just set the current one. (rather than the one locally)
|
|
|
|
downloadItem._modtime = Utility::qDateTimeToTime_t(QDateTime::currentDateTime());
|
2014-02-21 13:53:09 +04:00
|
|
|
} else {
|
|
|
|
// the file was removed or renamed, just recover the old one
|
|
|
|
downloadItem._instruction = CSYNC_INSTRUCTION_SYNC;
|
|
|
|
}
|
2014-03-20 17:56:31 +04:00
|
|
|
downloadItem._direction = SyncFileItem::Down;
|
2014-02-27 14:40:34 +04:00
|
|
|
newJob = new PropagateDownloadFileLegacy(_propagator, downloadItem);
|
2014-02-12 16:44:55 +04:00
|
|
|
} else {
|
|
|
|
// Directories are harder to recover.
|
|
|
|
// But just re-create the directory, next sync will be able to recover the files
|
|
|
|
SyncFileItem mkdirItem(_item);
|
|
|
|
mkdirItem._instruction = CSYNC_INSTRUCTION_SYNC;
|
2014-03-20 17:56:31 +04:00
|
|
|
mkdirItem._direction = SyncFileItem::Down;
|
2014-02-19 20:21:01 +04:00
|
|
|
newJob = new PropagateLocalMkdir(_propagator, mkdirItem);
|
2014-02-12 16:44:55 +04:00
|
|
|
// Also remove the inodes and fileid from the db so no further renames are tried for
|
|
|
|
// this item.
|
|
|
|
_propagator->_journal->avoidRenamesOnNextSync(_item._file);
|
|
|
|
}
|
2014-02-19 20:21:01 +04:00
|
|
|
if( newJob ) {
|
|
|
|
newJob->setRestoreJobMsg(msg);
|
|
|
|
_restoreJob.reset(newJob);
|
|
|
|
connect(_restoreJob.data(), SIGNAL(completed(SyncFileItem)),
|
|
|
|
this, SLOT(slotRestoreJobCompleted(SyncFileItem)));
|
2014-04-29 18:56:19 +04:00
|
|
|
QMetaObject::invokeMethod(newJob, "start");
|
2014-02-19 20:21:01 +04:00
|
|
|
}
|
2014-02-04 18:01:10 +04:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
void PropagateItemJob::slotRestoreJobCompleted(const SyncFileItem& item )
|
|
|
|
{
|
2014-02-19 20:21:01 +04:00
|
|
|
QString msg;
|
|
|
|
if(_restoreJob) {
|
|
|
|
msg = _restoreJob->restoreJobMsg();
|
|
|
|
_restoreJob->setRestoreJobMsg();
|
|
|
|
}
|
|
|
|
|
2014-06-23 15:56:17 +04:00
|
|
|
if( item._status == SyncFileItem::Success || item._status == SyncFileItem::Conflict
|
|
|
|
|| item._status == SyncFileItem::Restoration) {
|
2014-02-19 20:21:01 +04:00
|
|
|
done( SyncFileItem::SoftError, msg);
|
2014-02-04 18:01:10 +04:00
|
|
|
} else {
|
2014-02-19 20:21:01 +04:00
|
|
|
done( item._status, tr("A file or directory was removed from a read only share, but restoring failed: %1").arg(item._errorString) );
|
2014-02-04 18:01:10 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-23 20:54:35 +04:00
|
|
|
// ================================================================================
|
|
|
|
|
2013-10-28 13:47:10 +04:00
|
|
|
PropagateItemJob* OwncloudPropagator::createJob(const SyncFileItem& item) {
|
|
|
|
switch(item._instruction) {
|
|
|
|
case CSYNC_INSTRUCTION_REMOVE:
|
2014-03-20 17:56:31 +04:00
|
|
|
if (item._direction == SyncFileItem::Down) return new PropagateLocalRemove(this, item);
|
2013-10-28 13:47:10 +04:00
|
|
|
else return new PropagateRemoteRemove(this, item);
|
|
|
|
case CSYNC_INSTRUCTION_NEW:
|
|
|
|
if (item._isDirectory) {
|
2014-03-20 17:56:31 +04:00
|
|
|
if (item._direction == SyncFileItem::Down) return new PropagateLocalMkdir(this, item);
|
2013-10-28 13:47:10 +04:00
|
|
|
else return new PropagateRemoteMkdir(this, item);
|
|
|
|
} //fall trough
|
|
|
|
case CSYNC_INSTRUCTION_SYNC:
|
|
|
|
case CSYNC_INSTRUCTION_CONFLICT:
|
|
|
|
if (item._isDirectory) {
|
|
|
|
// Should we set the mtime?
|
|
|
|
return 0;
|
|
|
|
}
|
2014-02-18 15:24:35 +04:00
|
|
|
if (useLegacyJobs()) {
|
2014-03-20 17:56:31 +04:00
|
|
|
if (item._direction != SyncFileItem::Up) {
|
2014-02-18 15:24:35 +04:00
|
|
|
return new PropagateDownloadFileLegacy(this, item);
|
|
|
|
} else {
|
|
|
|
return new PropagateUploadFileLegacy(this, item);
|
|
|
|
}
|
|
|
|
} else {
|
2014-03-20 17:56:31 +04:00
|
|
|
if (item._direction != SyncFileItem::Up) {
|
2014-02-18 15:24:35 +04:00
|
|
|
return new PropagateDownloadFileQNAM(this, item);
|
|
|
|
} else {
|
|
|
|
return new PropagateUploadFileQNAM(this, item);
|
|
|
|
}
|
|
|
|
}
|
2013-10-28 13:47:10 +04:00
|
|
|
case CSYNC_INSTRUCTION_RENAME:
|
2014-03-20 17:56:31 +04:00
|
|
|
if (item._direction == SyncFileItem::Up) {
|
2013-10-30 20:36:58 +04:00
|
|
|
return new PropagateRemoteRename(this, item);
|
|
|
|
} else {
|
|
|
|
return new PropagateLocalRename(this, item);
|
|
|
|
}
|
2013-10-28 13:47:10 +04:00
|
|
|
case CSYNC_INSTRUCTION_IGNORE:
|
2014-06-13 13:19:31 +04:00
|
|
|
case CSYNC_INSTRUCTION_ERROR:
|
2013-10-28 13:47:10 +04:00
|
|
|
return new PropagateIgnoreJob(this, item);
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2013-08-14 21:59:16 +04:00
|
|
|
|
2014-06-27 15:34:15 +04:00
|
|
|
void OwncloudPropagator::start(const SyncFileItemVector& items)
|
2013-10-28 13:47:10 +04:00
|
|
|
{
|
2013-10-28 20:00:27 +04:00
|
|
|
/* This builds all the job needed for the propagation.
|
|
|
|
* Each directories is a PropagateDirectory job, which contains the files in it.
|
2014-06-27 15:34:15 +04:00
|
|
|
* In order to do that we loop over the items. (which are sorted by destination)
|
|
|
|
* When we enter adirectory, we can create the directory job and push it on the stack. */
|
|
|
|
|
2013-10-28 13:47:10 +04:00
|
|
|
_rootJob.reset(new PropagateDirectory(this));
|
2013-10-28 20:00:27 +04:00
|
|
|
QStack<QPair<QString /* directory name */, PropagateDirectory* /* job */> > directories;
|
2013-10-28 13:47:10 +04:00
|
|
|
directories.push(qMakePair(QString(), _rootJob.data()));
|
2013-10-28 20:00:27 +04:00
|
|
|
QVector<PropagatorJob*> directoriesToRemove;
|
2013-10-28 13:47:10 +04:00
|
|
|
QString removedDirectory;
|
|
|
|
foreach(const SyncFileItem &item, items) {
|
2014-06-04 18:37:46 +04:00
|
|
|
|
|
|
|
if (!removedDirectory.isEmpty() && item._file.startsWith(removedDirectory)) {
|
|
|
|
// this is an item in a directory which is going to be removed.
|
|
|
|
if (item._instruction == CSYNC_INSTRUCTION_REMOVE) {
|
|
|
|
//already taken care of. (by the removal of the parent directory)
|
|
|
|
continue;
|
|
|
|
} else if (item._instruction == CSYNC_INSTRUCTION_NEW && item._isDirectory) {
|
|
|
|
// create a new directory within a deleted directory? That can happen if the directory
|
|
|
|
// etag were not fetched properly on the previous sync because the sync was aborted
|
|
|
|
// while uploading this directory (which is now removed). We can ignore it.
|
|
|
|
continue;
|
|
|
|
} else if (item._instruction == CSYNC_INSTRUCTION_IGNORE) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
qWarning() << "WARNING: Job within a removed directory? This should not happen!"
|
|
|
|
<< item._file << item._instruction;
|
2013-08-14 21:59:16 +04:00
|
|
|
}
|
|
|
|
|
2014-02-19 16:06:55 +04:00
|
|
|
while (!item.destination().startsWith(directories.top().first)) {
|
2013-10-28 13:47:10 +04:00
|
|
|
directories.pop();
|
2013-08-14 21:59:16 +04:00
|
|
|
}
|
2013-10-28 13:47:10 +04:00
|
|
|
|
|
|
|
if (item._isDirectory) {
|
|
|
|
PropagateDirectory *dir = new PropagateDirectory(this, item);
|
|
|
|
dir->_firstJob.reset(createJob(item));
|
|
|
|
if (item._instruction == CSYNC_INSTRUCTION_REMOVE) {
|
2014-06-04 14:31:30 +04:00
|
|
|
//We do the removal of directories at the end, because there might be moves from
|
|
|
|
// this directories that will happen later.
|
2013-10-28 13:47:10 +04:00
|
|
|
directoriesToRemove.append(dir);
|
|
|
|
removedDirectory = item._file + "/";
|
2014-06-04 14:31:30 +04:00
|
|
|
|
|
|
|
// We should not update the etag of parent directories of the removed directory
|
|
|
|
// since it would be done before the actual remove (issue #1845)
|
|
|
|
// NOTE: Currently this means that we don't update those etag at all in this sync,
|
|
|
|
// but it should not be a problem, they will be updated in the next sync.
|
|
|
|
for (int i = 0; i < directories.size(); ++i) {
|
|
|
|
directories[i].second->_item._should_update_etag = false;
|
|
|
|
}
|
2013-10-28 13:47:10 +04:00
|
|
|
} else {
|
2014-06-04 14:31:30 +04:00
|
|
|
PropagateDirectory* currentDirJob = directories.top().second;
|
|
|
|
currentDirJob->append(dir);
|
2013-08-14 21:59:16 +04:00
|
|
|
}
|
2014-02-19 16:06:55 +04:00
|
|
|
directories.push(qMakePair(item.destination() + "/" , dir));
|
2013-10-28 13:47:10 +04:00
|
|
|
} else if (PropagateItemJob* current = createJob(item)) {
|
|
|
|
directories.top().second->append(current);
|
2013-08-14 21:59:16 +04:00
|
|
|
}
|
|
|
|
}
|
2013-10-28 13:47:10 +04:00
|
|
|
|
|
|
|
foreach(PropagatorJob* it, directoriesToRemove) {
|
|
|
|
_rootJob->append(it);
|
|
|
|
}
|
|
|
|
|
|
|
|
connect(_rootJob.data(), SIGNAL(completed(SyncFileItem)), this, SIGNAL(completed(SyncFileItem)));
|
2014-03-14 16:03:16 +04:00
|
|
|
connect(_rootJob.data(), SIGNAL(progress(SyncFileItem,quint64)), this, SIGNAL(progress(SyncFileItem,quint64)));
|
2014-05-29 14:15:13 +04:00
|
|
|
connect(_rootJob.data(), SIGNAL(finished(SyncFileItem::Status)), this, SLOT(emitFinished()));
|
2013-11-21 14:13:58 +04:00
|
|
|
|
2014-04-03 18:37:40 +04:00
|
|
|
qDebug() << (useLegacyJobs() ? "Using legacy libneon/HTTP sequential code path" : "Using QNAM/HTTP parallel code path");
|
|
|
|
|
2014-03-07 00:01:08 +04:00
|
|
|
QMetaObject::invokeMethod(_rootJob.data(), "start", Qt::QueuedConnection);
|
2013-08-14 21:59:16 +04:00
|
|
|
}
|
2013-05-03 21:11:00 +04:00
|
|
|
|
2014-02-04 18:01:10 +04:00
|
|
|
bool OwncloudPropagator::isInSharedDirectory(const QString& file)
|
|
|
|
{
|
|
|
|
bool re = false;
|
|
|
|
if( _remoteDir.contains("remote.php/webdav/Shared") ) {
|
|
|
|
// The Shared directory is synced as its own sync connection
|
|
|
|
re = true;
|
|
|
|
} else {
|
2014-02-19 18:23:36 +04:00
|
|
|
if( file.startsWith("Shared/") || file == "Shared" ) {
|
2014-02-04 18:01:10 +04:00
|
|
|
// The whole ownCloud is synced and Shared is always a top dir
|
|
|
|
re = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return re;
|
|
|
|
}
|
|
|
|
|
2014-02-18 15:24:35 +04:00
|
|
|
/**
|
|
|
|
* Return true if we should use the legacy jobs.
|
|
|
|
* Some feature are not supported by QNAM and therefore we still use the legacy jobs
|
|
|
|
* for this case.
|
|
|
|
*/
|
|
|
|
bool OwncloudPropagator::useLegacyJobs()
|
|
|
|
{
|
|
|
|
if (_downloadLimit.fetchAndAddAcquire(0) != 0 || _uploadLimit.fetchAndAddAcquire(0) != 0) {
|
|
|
|
// QNAM does not support bandwith limiting
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Allow an environement variable for debugging
|
|
|
|
QByteArray env = qgetenv("OWNCLOUD_USE_LEGACY_JOBS");
|
|
|
|
return env=="true" || env =="1";
|
|
|
|
}
|
|
|
|
|
2014-05-23 20:54:35 +04:00
|
|
|
int OwncloudPropagator::httpTimeout()
|
|
|
|
{
|
|
|
|
static int timeout;
|
|
|
|
if (!timeout) {
|
|
|
|
timeout = qgetenv("OWNCLOUD_TIMEOUT").toUInt();
|
|
|
|
if (timeout == 0) {
|
2014-05-28 18:28:22 +04:00
|
|
|
MirallConfigFile cfg;
|
|
|
|
timeout = cfg.timeout();
|
2014-05-23 20:54:35 +04:00
|
|
|
}
|
2014-05-28 18:28:22 +04:00
|
|
|
|
2014-05-23 20:54:35 +04:00
|
|
|
}
|
|
|
|
return timeout;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool OwncloudPropagator::localFileNameClash( const QString& relFile )
|
|
|
|
{
|
|
|
|
bool re = false;
|
|
|
|
const QString file( _localDir + relFile );
|
2014-05-26 19:36:52 +04:00
|
|
|
|
2014-05-26 19:52:24 +04:00
|
|
|
if( !file.isEmpty() && Utility::fsCasePreserving() ) {
|
|
|
|
#ifdef Q_OS_MAC
|
|
|
|
QFileInfo fileInfo(file);
|
2014-05-30 17:46:51 +04:00
|
|
|
if (!fileInfo.exists()) {
|
2014-05-26 19:52:24 +04:00
|
|
|
re = false;
|
2014-05-30 17:46:51 +04:00
|
|
|
} else {
|
|
|
|
re = ( ! fileInfo.canonicalFilePath().endsWith(relFile, Qt::CaseSensitive) );
|
|
|
|
}
|
2014-05-26 19:52:24 +04:00
|
|
|
#elif defined(Q_OS_WIN)
|
|
|
|
const QString file( _localDir + relFile );
|
|
|
|
qDebug() << "CaseClashCheck for " << file;
|
|
|
|
WIN32_FIND_DATA FindFileData;
|
|
|
|
HANDLE hFind;
|
|
|
|
|
|
|
|
hFind = FindFirstFileW( (wchar_t*)file.utf16(), &FindFileData);
|
|
|
|
if (hFind == INVALID_HANDLE_VALUE) {
|
2014-06-02 21:38:04 +04:00
|
|
|
//qDebug() << "FindFirstFile failed " << GetLastError();
|
2014-05-26 19:52:24 +04:00
|
|
|
// returns false.
|
|
|
|
} else {
|
|
|
|
QString realFileName = QString::fromWCharArray( FindFileData.cFileName );
|
|
|
|
qDebug() << Q_FUNC_INFO << "Real file name is " << realFileName;
|
|
|
|
FindClose(hFind);
|
2014-05-23 20:54:35 +04:00
|
|
|
|
2014-05-26 19:52:24 +04:00
|
|
|
if( ! file.endsWith(realFileName, Qt::CaseSensitive) ) {
|
|
|
|
re = true;
|
|
|
|
}
|
2014-05-23 20:54:35 +04:00
|
|
|
}
|
2014-07-07 15:00:38 +04:00
|
|
|
#else
|
|
|
|
// On Linux, the file system is case sensitive, but this code is usefull for testing.
|
|
|
|
// Just check that there is no other file with the same name and different casing.
|
|
|
|
QFileInfo fileInfo(file);
|
|
|
|
const QString fn = fileInfo.fileName();
|
|
|
|
QStringList list = fileInfo.dir().entryList(QStringList() << fn);
|
|
|
|
if (list.count() > 1 || (list.count() == 1 && list[0] != fn)) {
|
|
|
|
re = true;
|
|
|
|
}
|
2014-05-23 20:54:35 +04:00
|
|
|
#endif
|
2014-05-26 19:52:24 +04:00
|
|
|
}
|
2014-05-23 20:54:35 +04:00
|
|
|
return re;
|
|
|
|
}
|
|
|
|
|
|
|
|
// ================================================================================
|
2014-02-18 15:24:35 +04:00
|
|
|
|
2013-11-15 20:42:27 +04:00
|
|
|
void PropagateDirectory::start()
|
|
|
|
{
|
|
|
|
_current = -1;
|
2013-11-28 13:00:12 +04:00
|
|
|
_hasError = SyncFileItem::NoStatus;
|
2013-11-15 20:42:27 +04:00
|
|
|
if (!_firstJob) {
|
2014-02-12 14:07:34 +04:00
|
|
|
slotSubJobReady();
|
2013-11-15 20:42:27 +04:00
|
|
|
} else {
|
|
|
|
startJob(_firstJob.data());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-06 15:11:45 +04:00
|
|
|
void PropagateDirectory::slotSubJobFinished(SyncFileItem::Status status)
|
2013-10-28 13:47:10 +04:00
|
|
|
{
|
2014-06-23 15:56:17 +04:00
|
|
|
if (status == SyncFileItem::FatalError ||
|
|
|
|
(_current == -1 && status != SyncFileItem::Success && status != SyncFileItem::Restoration)) {
|
2014-02-06 17:52:56 +04:00
|
|
|
abort();
|
2013-10-28 13:47:10 +04:00
|
|
|
emit finished(status);
|
|
|
|
return;
|
2013-11-28 13:00:12 +04:00
|
|
|
} else if (status == SyncFileItem::NormalError || status == SyncFileItem::SoftError) {
|
|
|
|
_hasError = status;
|
2013-10-28 13:47:10 +04:00
|
|
|
}
|
2014-02-12 14:07:34 +04:00
|
|
|
_runningNow--;
|
|
|
|
slotSubJobReady();
|
|
|
|
}
|
2013-10-28 13:47:10 +04:00
|
|
|
|
2014-02-12 14:07:34 +04:00
|
|
|
void PropagateDirectory::slotSubJobReady()
|
|
|
|
{
|
|
|
|
if (_runningNow && _current == -1)
|
|
|
|
return; // Ignore the case when the _fistJob is ready and not yet finished
|
|
|
|
if (_runningNow && _current >= 0 && _current < _subJobs.count()) {
|
|
|
|
// there is a job running and the current one is not ready yet, we can't start new job
|
2014-03-28 14:20:07 +04:00
|
|
|
if (!_subJobs[_current]->_readySent || _propagator->_activeJobs >= maximumActiveJob())
|
2014-02-12 14:07:34 +04:00
|
|
|
return;
|
2014-02-06 15:11:45 +04:00
|
|
|
}
|
|
|
|
|
2014-02-12 14:07:34 +04:00
|
|
|
_current++;
|
|
|
|
if (_current < _subJobs.size() && !_propagator->_abortRequested.fetchAndAddRelaxed(0)) {
|
|
|
|
PropagatorJob *next = _subJobs.at(_current);
|
|
|
|
startJob(next);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// We finished to processing all the jobs
|
|
|
|
emitReady();
|
|
|
|
if (!_runningNow) {
|
2013-11-28 13:00:12 +04:00
|
|
|
if (!_item.isEmpty() && _hasError == SyncFileItem::NoStatus) {
|
2013-11-05 20:50:09 +04:00
|
|
|
if( !_item._renameTarget.isEmpty() ) {
|
|
|
|
_item._file = _item._renameTarget;
|
|
|
|
}
|
|
|
|
|
2014-01-08 15:51:42 +04:00
|
|
|
if (_item._should_update_etag && _item._instruction != CSYNC_INSTRUCTION_REMOVE) {
|
2014-06-12 15:45:25 +04:00
|
|
|
if (PropagateRemoteMkdir* mkdir = qobject_cast<PropagateRemoteMkdir*>(_firstJob.data())) {
|
|
|
|
// special case from MKDIR, get the fileId from the job there
|
|
|
|
if (_item._fileId.isEmpty() && !mkdir->_item._fileId.isEmpty()) {
|
|
|
|
_item._fileId = mkdir->_item._fileId;
|
|
|
|
}
|
|
|
|
}
|
2013-11-15 16:53:18 +04:00
|
|
|
SyncJournalFileRecord record(_item, _propagator->_localDir + _item._file);
|
|
|
|
_propagator->_journal->setFileRecord(record);
|
|
|
|
}
|
2013-10-28 13:47:10 +04:00
|
|
|
}
|
2013-11-28 13:00:12 +04:00
|
|
|
emit finished(_hasError == SyncFileItem::NoStatus ? SyncFileItem::Success : _hasError);
|
2013-10-28 13:47:10 +04:00
|
|
|
}
|
|
|
|
}
|
2013-05-03 21:11:00 +04:00
|
|
|
|
2013-05-04 18:12:51 +04:00
|
|
|
}
|