mirror of
https://github.com/nextcloud/desktop.git
synced 2024-11-21 20:45:51 +03:00
Merge pull request #4962 from nextcloud/bugfix/files-not-unlocking
This commit is contained in:
commit
b9f6d914d9
7 changed files with 364 additions and 21 deletions
|
@ -1351,7 +1351,7 @@ bool SyncJournalDb::updateFileRecordChecksum(const QString &filename,
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SyncJournalDb::updateLocalMetadata(const QString &filename,
|
bool SyncJournalDb::updateLocalMetadata(const QString &filename,
|
||||||
qint64 modtime, qint64 size, quint64 inode)
|
qint64 modtime, qint64 size, quint64 inode, const SyncJournalFileLockInfo &lockInfo)
|
||||||
|
|
||||||
{
|
{
|
||||||
QMutexLocker locker(&_mutex);
|
QMutexLocker locker(&_mutex);
|
||||||
|
@ -1365,7 +1365,9 @@ bool SyncJournalDb::updateLocalMetadata(const QString &filename,
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto query = _queryManager.get(PreparedSqlQueryManager::SetFileRecordLocalMetadataQuery, QByteArrayLiteral("UPDATE metadata"
|
const auto query = _queryManager.get(PreparedSqlQueryManager::SetFileRecordLocalMetadataQuery, QByteArrayLiteral("UPDATE metadata"
|
||||||
" SET inode=?2, modtime=?3, filesize=?4"
|
" SET inode=?2, modtime=?3, filesize=?4, lock=?5, lockType=?6,"
|
||||||
|
" lockOwnerDisplayName=?7, lockOwnerId=?8, lockOwnerEditor = ?9,"
|
||||||
|
" lockTime=?10, lockTimeout=?11"
|
||||||
" WHERE phash == ?1;"),
|
" WHERE phash == ?1;"),
|
||||||
_db);
|
_db);
|
||||||
if (!query) {
|
if (!query) {
|
||||||
|
@ -1376,6 +1378,13 @@ bool SyncJournalDb::updateLocalMetadata(const QString &filename,
|
||||||
query->bindValue(2, inode);
|
query->bindValue(2, inode);
|
||||||
query->bindValue(3, modtime);
|
query->bindValue(3, modtime);
|
||||||
query->bindValue(4, size);
|
query->bindValue(4, size);
|
||||||
|
query->bindValue(5, lockInfo._locked ? 1 : 0);
|
||||||
|
query->bindValue(6, lockInfo._lockOwnerType);
|
||||||
|
query->bindValue(7, lockInfo._lockOwnerDisplayName);
|
||||||
|
query->bindValue(8, lockInfo._lockOwnerId);
|
||||||
|
query->bindValue(9, lockInfo._lockEditorApp);
|
||||||
|
query->bindValue(10, lockInfo._lockTime);
|
||||||
|
query->bindValue(11, lockInfo._lockTimeout);
|
||||||
return query->exec();
|
return query->exec();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -78,7 +78,7 @@ public:
|
||||||
const QByteArray &contentChecksum,
|
const QByteArray &contentChecksum,
|
||||||
const QByteArray &contentChecksumType);
|
const QByteArray &contentChecksumType);
|
||||||
[[nodiscard]] bool updateLocalMetadata(const QString &filename,
|
[[nodiscard]] bool updateLocalMetadata(const QString &filename,
|
||||||
qint64 modtime, qint64 size, quint64 inode);
|
qint64 modtime, qint64 size, quint64 inode, const SyncJournalFileLockInfo &lockInfo);
|
||||||
|
|
||||||
/// Return value for hasHydratedOrDehydratedFiles()
|
/// Return value for hasHydratedOrDehydratedFiles()
|
||||||
struct HasHydratedDehydrated
|
struct HasHydratedDehydrated
|
||||||
|
|
|
@ -394,6 +394,29 @@ void ProcessDirectoryJob::processFile(PathTuple path,
|
||||||
if (item->_type == ItemTypeVirtualFileDehydration)
|
if (item->_type == ItemTypeVirtualFileDehydration)
|
||||||
item->_type = ItemTypeFile;
|
item->_type = ItemTypeFile;
|
||||||
|
|
||||||
|
// We want to check the lock state of this file after the lock time has expired
|
||||||
|
if(serverEntry.locked == SyncFileItem::LockStatus::LockedItem) {
|
||||||
|
const auto lockExpirationTime = serverEntry.lockTime + serverEntry.lockTimeout;
|
||||||
|
const auto timeRemaining = QDateTime::currentDateTime().secsTo(QDateTime::fromSecsSinceEpoch(lockExpirationTime));
|
||||||
|
// Add on a second as a precaution, sometimes we catch the server before it has had a chance to update
|
||||||
|
const auto lockExpirationTimeout = qMax(5LL, timeRemaining + 1);
|
||||||
|
|
||||||
|
qCInfo(lcDisco) << "File:" << path._original << "is locked."
|
||||||
|
<< "Lock expires in:" << lockExpirationTimeout << "seconds."
|
||||||
|
<< "A sync run will be scheduled for around that time.";
|
||||||
|
|
||||||
|
_discoveryData->_anotherSyncNeeded = true;
|
||||||
|
_discoveryData->_filesNeedingScheduledSync.insert(path._original, lockExpirationTimeout);
|
||||||
|
|
||||||
|
} else if (serverEntry.locked == SyncFileItem::LockStatus::UnlockedItem && dbEntry._lockstate._locked) {
|
||||||
|
// We have received data that this file has been unlocked remotely, so let's notify the sync engine
|
||||||
|
// that we no longer need a scheduled sync run for this file
|
||||||
|
qCInfo(lcDisco) << "File:" << path._original << "is unlocked and a scheduled sync is no longer needed."
|
||||||
|
<< "Will remove scheduled sync if there is one.";
|
||||||
|
|
||||||
|
_discoveryData->_filesUnscheduleSync.append(path._original);
|
||||||
|
}
|
||||||
|
|
||||||
// VFS suffixed files on the server are ignored
|
// VFS suffixed files on the server are ignored
|
||||||
if (isVfsWithSuffix()) {
|
if (isVfsWithSuffix()) {
|
||||||
if (hasVirtualFileSuffix(serverEntry.name)
|
if (hasVirtualFileSuffix(serverEntry.name)
|
||||||
|
@ -498,6 +521,15 @@ void ProcessDirectoryJob::processFileAnalyzeRemoteInfo(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We need to make sure that we update the info in the database if the lockstate has changed
|
||||||
|
const auto checkFileLockState = [&item, &dbEntry, &serverEntry] {
|
||||||
|
const bool isServerEntryLocked = serverEntry.locked == SyncFileItem::LockStatus::LockedItem;
|
||||||
|
|
||||||
|
if(isServerEntryLocked != dbEntry._lockstate._locked) {
|
||||||
|
item->_instruction = CSYNC_INSTRUCTION_UPDATE_METADATA;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// The file is known in the db already
|
// The file is known in the db already
|
||||||
if (dbEntry.isValid()) {
|
if (dbEntry.isValid()) {
|
||||||
const bool isDbEntryAnE2EePlaceholder = dbEntry.isVirtualFile() && !dbEntry.e2eMangledName().isEmpty();
|
const bool isDbEntryAnE2EePlaceholder = dbEntry.isVirtualFile() && !dbEntry.e2eMangledName().isEmpty();
|
||||||
|
@ -579,10 +611,12 @@ void ProcessDirectoryJob::processFileAnalyzeRemoteInfo(
|
||||||
return ParentNotChanged;
|
return ParentNotChanged;
|
||||||
}();
|
}();
|
||||||
|
|
||||||
|
checkFileLockState();
|
||||||
processFileAnalyzeLocalInfo(item, path, localEntry, serverEntry, dbEntry, serverQueryMode);
|
processFileAnalyzeLocalInfo(item, path, localEntry, serverEntry, dbEntry, serverQueryMode);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
checkFileLockState();
|
||||||
processFileAnalyzeLocalInfo(item, path, localEntry, serverEntry, dbEntry, _queryServer);
|
processFileAnalyzeLocalInfo(item, path, localEntry, serverEntry, dbEntry, _queryServer);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -815,7 +849,7 @@ void ProcessDirectoryJob::processFileAnalyzeLocalInfo(
|
||||||
|
|
||||||
bool serverModified = item->_instruction == CSYNC_INSTRUCTION_NEW || item->_instruction == CSYNC_INSTRUCTION_SYNC
|
bool serverModified = item->_instruction == CSYNC_INSTRUCTION_NEW || item->_instruction == CSYNC_INSTRUCTION_SYNC
|
||||||
|| item->_instruction == CSYNC_INSTRUCTION_RENAME || item->_instruction == CSYNC_INSTRUCTION_TYPE_CHANGE;
|
|| item->_instruction == CSYNC_INSTRUCTION_RENAME || item->_instruction == CSYNC_INSTRUCTION_TYPE_CHANGE;
|
||||||
|
|
||||||
qCDebug(lcDisco) << "File" << item->_file << "- servermodified:" << serverModified
|
qCDebug(lcDisco) << "File" << item->_file << "- servermodified:" << serverModified
|
||||||
<< "noServerEntry:" << noServerEntry;
|
<< "noServerEntry:" << noServerEntry;
|
||||||
|
|
||||||
|
@ -1029,7 +1063,7 @@ void ProcessDirectoryJob::processFileAnalyzeLocalInfo(
|
||||||
item->_size = localEntry.size;
|
item->_size = localEntry.size;
|
||||||
item->_modtime = localEntry.modtime;
|
item->_modtime = localEntry.modtime;
|
||||||
_childModified = true;
|
_childModified = true;
|
||||||
|
|
||||||
qCDebug(lcDisco) << "Local file was changed: File" << item->_file
|
qCDebug(lcDisco) << "Local file was changed: File" << item->_file
|
||||||
<< "item->_instruction:" << item->_instruction
|
<< "item->_instruction:" << item->_instruction
|
||||||
<< "noServerEntry:" << noServerEntry
|
<< "noServerEntry:" << noServerEntry
|
||||||
|
@ -1316,7 +1350,7 @@ void ProcessDirectoryJob::processFileAnalyzeLocalInfo(
|
||||||
chopVirtualFileSuffix(serverOriginalPath);
|
chopVirtualFileSuffix(serverOriginalPath);
|
||||||
auto job = new RequestEtagJob(_discoveryData->_account, serverOriginalPath, this);
|
auto job = new RequestEtagJob(_discoveryData->_account, serverOriginalPath, this);
|
||||||
connect(job, &RequestEtagJob::finishedWithResult, this, [=](const HttpResult<QByteArray> &etag) mutable {
|
connect(job, &RequestEtagJob::finishedWithResult, this, [=](const HttpResult<QByteArray> &etag) mutable {
|
||||||
|
|
||||||
|
|
||||||
if (!etag || (etag.get() != base._etag && !item->isDirectory()) || _discoveryData->isRenamed(originalPath)
|
if (!etag || (etag.get() != base._etag && !item->isDirectory()) || _discoveryData->isRenamed(originalPath)
|
||||||
|| (isAnyParentBeingRestored(originalPath) && !isRename(originalPath))) {
|
|| (isAnyParentBeingRestored(originalPath) && !isRename(originalPath))) {
|
||||||
|
@ -1382,7 +1416,7 @@ void ProcessDirectoryJob::processFileConflict(const SyncFileItemPtr &item, Proce
|
||||||
<< "localEntry.modtime:" << localEntry.modtime;
|
<< "localEntry.modtime:" << localEntry.modtime;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!serverEntry.checksumHeader.isEmpty()) {
|
if (!serverEntry.checksumHeader.isEmpty()) {
|
||||||
qCDebug(lcDisco) << "CSYNC_INSTRUCTION_CONFLICT: File" << item->_file << "if (!serverEntry.checksumHeader.isEmpty())";
|
qCDebug(lcDisco) << "CSYNC_INSTRUCTION_CONFLICT: File" << item->_file << "if (!serverEntry.checksumHeader.isEmpty())";
|
||||||
qCDebug(lcDisco) << "CSYNC_INSTRUCTION_CONFLICT: serverEntry.size:" << serverEntry.size
|
qCDebug(lcDisco) << "CSYNC_INSTRUCTION_CONFLICT: serverEntry.size:" << serverEntry.size
|
||||||
|
@ -1425,7 +1459,7 @@ void ProcessDirectoryJob::processFileConflict(const SyncFileItemPtr &item, Proce
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!up._valid || up._contentChecksum != serverEntry.checksumHeader) {
|
if (!up._valid || up._contentChecksum != serverEntry.checksumHeader) {
|
||||||
qCDebug(lcDisco) << "CSYNC_INSTRUCTION_SYNC: File" << item->_file << "if (!up._valid && up._contentChecksum != serverEntry.checksumHeader)";
|
qCDebug(lcDisco) << "CSYNC_INSTRUCTION_SYNC: File" << item->_file << "if (!up._valid && up._contentChecksum != serverEntry.checksumHeader)";
|
||||||
qCDebug(lcDisco) << "CSYNC_INSTRUCTION_SYNC: up._valid:" << up._valid
|
qCDebug(lcDisco) << "CSYNC_INSTRUCTION_SYNC: up._valid:" << up._valid
|
||||||
|
@ -1636,7 +1670,7 @@ bool ProcessDirectoryJob::isRename(const QString &originalPath) const
|
||||||
|
|
||||||
/* TODO: This was needed at some point to cover an edge case which I am no longer to reproduce and it might no longer be the case.
|
/* TODO: This was needed at some point to cover an edge case which I am no longer to reproduce and it might no longer be the case.
|
||||||
* Still, leaving this here just in case the edge case is caught at some point in future.
|
* Still, leaving this here just in case the edge case is caught at some point in future.
|
||||||
*
|
*
|
||||||
OCC::SyncJournalFileRecord base;
|
OCC::SyncJournalFileRecord base;
|
||||||
// are we allowed to rename?
|
// are we allowed to rename?
|
||||||
if (!_discoveryData || !_discoveryData->_statedb || !_discoveryData->_statedb->getFileRecord(originalPath, &base)) {
|
if (!_discoveryData || !_discoveryData->_statedb || !_discoveryData->_statedb->getFileRecord(originalPath, &base)) {
|
||||||
|
|
|
@ -283,6 +283,8 @@ public:
|
||||||
// output
|
// output
|
||||||
QByteArray _dataFingerprint;
|
QByteArray _dataFingerprint;
|
||||||
bool _anotherSyncNeeded = false;
|
bool _anotherSyncNeeded = false;
|
||||||
|
QHash<QString, long long> _filesNeedingScheduledSync;
|
||||||
|
QVector<QString> _filesUnscheduleSync;
|
||||||
|
|
||||||
signals:
|
signals:
|
||||||
void fatalError(const QString &errorString);
|
void fatalError(const QString &errorString);
|
||||||
|
|
|
@ -391,7 +391,17 @@ void OCC::SyncEngine::slotItemDiscovered(const OCC::SyncFileItemPtr &item)
|
||||||
emit itemCompleted(item);
|
emit itemCompleted(item);
|
||||||
} else {
|
} else {
|
||||||
// Update only outdated data from the disk.
|
// Update only outdated data from the disk.
|
||||||
if (!_journal->updateLocalMetadata(item->_file, item->_modtime, item->_size, item->_inode)) {
|
|
||||||
|
SyncJournalFileLockInfo lockInfo;
|
||||||
|
lockInfo._locked = item->_locked == SyncFileItem::LockStatus::LockedItem;
|
||||||
|
lockInfo._lockTime = item->_lockTime;
|
||||||
|
lockInfo._lockTimeout = item->_lockTimeout;
|
||||||
|
lockInfo._lockOwnerId = item->_lockOwnerId;
|
||||||
|
lockInfo._lockOwnerType = static_cast<qint64>(item->_lockOwnerType);
|
||||||
|
lockInfo._lockOwnerDisplayName = item->_lockOwnerDisplayName;
|
||||||
|
lockInfo._lockEditorApp = item->_lockOwnerDisplayName;
|
||||||
|
|
||||||
|
if (!_journal->updateLocalMetadata(item->_file, item->_modtime, item->_size, item->_inode, lockInfo)) {
|
||||||
qCWarning(lcEngine) << "Could not update local metadata for file" << item->_file;
|
qCWarning(lcEngine) << "Could not update local metadata for file" << item->_file;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -453,7 +463,6 @@ void SyncEngine::startSync()
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s_anySyncRunning || _syncRunning) {
|
if (s_anySyncRunning || _syncRunning) {
|
||||||
ASSERT(false)
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -689,10 +698,16 @@ void SyncEngine::slotDiscoveryFinished()
|
||||||
restoreOldFiles(_syncItems);
|
restoreOldFiles(_syncItems);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_discoveryPhase->_anotherSyncNeeded && _anotherSyncNeeded == NoFollowUpSync) {
|
if (_discoveryPhase->_anotherSyncNeeded && !_discoveryPhase->_filesNeedingScheduledSync.empty()) {
|
||||||
|
slotScheduleFilesDelayedSync();
|
||||||
|
} else if (_discoveryPhase->_anotherSyncNeeded && _anotherSyncNeeded == NoFollowUpSync) {
|
||||||
_anotherSyncNeeded = ImmediateFollowUp;
|
_anotherSyncNeeded = ImmediateFollowUp;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!_discoveryPhase->_filesUnscheduleSync.empty()) {
|
||||||
|
slotUnscheduleFilesDelayedSync();
|
||||||
|
}
|
||||||
|
|
||||||
Q_ASSERT(std::is_sorted(_syncItems.begin(), _syncItems.end()));
|
Q_ASSERT(std::is_sorted(_syncItems.begin(), _syncItems.end()));
|
||||||
|
|
||||||
qCInfo(lcEngine) << "#### Reconcile (aboutToPropagate) #################################################### " << _stopWatch.addLapTime(QStringLiteral("Reconcile (aboutToPropagate)")) << "ms";
|
qCInfo(lcEngine) << "#### Reconcile (aboutToPropagate) #################################################### " << _stopWatch.addLapTime(QStringLiteral("Reconcile (aboutToPropagate)")) << "ms";
|
||||||
|
@ -1116,4 +1131,200 @@ void SyncEngine::slotInsufficientRemoteStorage()
|
||||||
emit syncError(msg, ErrorCategory::InsufficientRemoteStorage);
|
emit syncError(msg, ErrorCategory::InsufficientRemoteStorage);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void SyncEngine::slotScheduleFilesDelayedSync()
|
||||||
|
{
|
||||||
|
if (!_discoveryPhase || _discoveryPhase->_filesNeedingScheduledSync.empty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The latest sync of the interval bucket is the one that goes through and is used in the timer.
|
||||||
|
// By running the sync run as late as possible in the selected interval, we try to strike a
|
||||||
|
// balance between updating the needed file in a timely manner while also syncing late enough
|
||||||
|
// to cover all the files in the interval bucket.
|
||||||
|
|
||||||
|
static constexpr qint64 intervalSecs = 60;
|
||||||
|
const auto scheduledSyncBuckets = groupNeededScheduledSyncRuns(intervalSecs);
|
||||||
|
|
||||||
|
qCDebug(lcEngine) << "Active scheduled sync run timers:" << _scheduledSyncTimers.count();
|
||||||
|
|
||||||
|
for (const auto &[scheduledSyncTimerSecs, filesAffected] : scheduledSyncBuckets) {
|
||||||
|
|
||||||
|
const auto currentSecsSinceEpoch = QDateTime::currentSecsSinceEpoch();
|
||||||
|
const auto scheduledSyncTimerTime = QDateTime::fromSecsSinceEpoch(currentSecsSinceEpoch + scheduledSyncTimerSecs);
|
||||||
|
const auto scheduledSyncTimerMsecs = std::chrono::milliseconds(scheduledSyncTimerSecs * 1000);
|
||||||
|
|
||||||
|
const auto addFilesToTimerAndScheduledHash = [this, &files = filesAffected] (const QSharedPointer<ScheduledSyncTimer> &timer) {
|
||||||
|
for (const auto &file : files) {
|
||||||
|
timer->files.insert(file);
|
||||||
|
_filesScheduledForLaterSync.insert(file, timer);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// We want to make sure that this bucket won't schedule a sync near a pre-existing sync run,
|
||||||
|
// as we often get, for example, locked file notifications one by one as the user interacts
|
||||||
|
// through the web.
|
||||||
|
|
||||||
|
const auto nearbyTimer = nearbyScheduledSyncTimer(scheduledSyncTimerSecs, intervalSecs);
|
||||||
|
|
||||||
|
if (nearbyTimer) {
|
||||||
|
addFilesToTimerAndScheduledHash(nearbyTimer);
|
||||||
|
qCInfo(lcEngine) << "Using a nearby scheduled sync run at:" << scheduledSyncTimerTime
|
||||||
|
<< "for files:" << filesAffected
|
||||||
|
<< "this timer is now resoponsible for files:" << nearbyTimer->files;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
qCInfo(lcEngine) << "Will have a new sync run in" << scheduledSyncTimerSecs
|
||||||
|
<< "seconds, at" << scheduledSyncTimerTime
|
||||||
|
<< "for files:" << filesAffected;
|
||||||
|
|
||||||
|
QSharedPointer<ScheduledSyncTimer> newTimer(new ScheduledSyncTimer);
|
||||||
|
newTimer->setSingleShot(true);
|
||||||
|
newTimer->callOnTimeout(this, [this, newTimer] {
|
||||||
|
qCInfo(lcEngine) << "Rescanning now that delayed sync run is scheduled for:" << newTimer->files;
|
||||||
|
|
||||||
|
for (const auto &file : newTimer->files) {
|
||||||
|
this->_filesScheduledForLaterSync.remove(file);
|
||||||
|
}
|
||||||
|
|
||||||
|
this->startSync();
|
||||||
|
this->slotCleanupScheduledSyncTimers();
|
||||||
|
});
|
||||||
|
|
||||||
|
addFilesToTimerAndScheduledHash(newTimer);
|
||||||
|
newTimer->start(scheduledSyncTimerMsecs);
|
||||||
|
_scheduledSyncTimers.append(newTimer);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
QHash<qint64, SyncEngine::ScheduledSyncBucket> SyncEngine::groupNeededScheduledSyncRuns(const qint64 interval) const
|
||||||
|
{
|
||||||
|
if (!_discoveryPhase || _discoveryPhase->_filesNeedingScheduledSync.empty()) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
QHash<qint64, ScheduledSyncBucket> intervalSyncBuckets;
|
||||||
|
|
||||||
|
for (auto it = _discoveryPhase->_filesNeedingScheduledSync.cbegin();
|
||||||
|
it != _discoveryPhase->_filesNeedingScheduledSync.cend();
|
||||||
|
++it) {
|
||||||
|
|
||||||
|
const auto file = it.key();
|
||||||
|
const auto syncScheduledSecs = it.value();
|
||||||
|
|
||||||
|
// We don't want to schedule syncs again for files we have already discovered needing a
|
||||||
|
// scheduled sync, unless the files have been re-locked or had their lock expire time
|
||||||
|
// extended. So we check the time-out of the already set timer with the time-out we
|
||||||
|
// receive from the server entry
|
||||||
|
//
|
||||||
|
// Since the division here is both of ints, we receive a "floor" of the division, so we
|
||||||
|
// are safe from a possible situation where the timer's interval is lower than we need
|
||||||
|
// for the file we are possibly scheduling a sync run for
|
||||||
|
|
||||||
|
if (_filesScheduledForLaterSync.contains(file) &&
|
||||||
|
_filesScheduledForLaterSync.value(file)->interval() / 1000 >= syncScheduledSecs) {
|
||||||
|
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Both qint64 so division results in floor-ed result
|
||||||
|
const auto intervalBucketKey = syncScheduledSecs / interval;
|
||||||
|
|
||||||
|
if (!intervalSyncBuckets.contains(intervalBucketKey)) {
|
||||||
|
intervalSyncBuckets.insert(intervalBucketKey, {syncScheduledSecs, {file}});
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto bucketValue = intervalSyncBuckets.value(intervalBucketKey);
|
||||||
|
bucketValue.scheduledSyncTimerSecs = qMax(bucketValue.scheduledSyncTimerSecs, syncScheduledSecs);
|
||||||
|
bucketValue.files.append(file);
|
||||||
|
intervalSyncBuckets.insert(intervalBucketKey, bucketValue);
|
||||||
|
}
|
||||||
|
|
||||||
|
return intervalSyncBuckets;
|
||||||
|
}
|
||||||
|
|
||||||
|
QSharedPointer<SyncEngine::ScheduledSyncTimer> SyncEngine::nearbyScheduledSyncTimer(const qint64 scheduledSyncTimerSecs,
|
||||||
|
const qint64 intervalSecs) const
|
||||||
|
{
|
||||||
|
const auto scheduledSyncTimerMsecs = scheduledSyncTimerSecs * 1000;
|
||||||
|
const auto halfIntervalMsecs = (intervalSecs * 1000) / 2;
|
||||||
|
|
||||||
|
for (const auto &scheduledTimer : _scheduledSyncTimers) {
|
||||||
|
|
||||||
|
const auto timerRemainingMsecs = scheduledTimer->remainingTime();
|
||||||
|
const auto differenceMsecs = timerRemainingMsecs - scheduledSyncTimerMsecs;
|
||||||
|
const auto nearbyScheduledSync = differenceMsecs > -halfIntervalMsecs &&
|
||||||
|
differenceMsecs < halfIntervalMsecs;
|
||||||
|
|
||||||
|
// Iterated timer is going to fire slightly before we need it to for the parameter timer, delay it.
|
||||||
|
if (differenceMsecs > -halfIntervalMsecs && differenceMsecs < 0) {
|
||||||
|
|
||||||
|
const auto scheduledSyncTimerTimeoutMsecs = std::chrono::milliseconds(scheduledSyncTimerMsecs);
|
||||||
|
scheduledTimer->start(scheduledSyncTimerTimeoutMsecs);
|
||||||
|
|
||||||
|
qCDebug(lcEngine) << "Delayed sync timer with remaining time" << timerRemainingMsecs / 1000
|
||||||
|
<< "by" << (differenceMsecs * -1) / 1000
|
||||||
|
<< "seconds due to nearby new sync run needed.";
|
||||||
|
}
|
||||||
|
|
||||||
|
if(nearbyScheduledSync) {
|
||||||
|
return scheduledTimer;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
void SyncEngine::slotCleanupScheduledSyncTimers()
|
||||||
|
{
|
||||||
|
qCDebug(lcEngine) << "Beginning scheduled sync timer cleanup.";
|
||||||
|
|
||||||
|
auto it = _scheduledSyncTimers.begin();
|
||||||
|
|
||||||
|
while(it != _scheduledSyncTimers.end()) {
|
||||||
|
const auto &timer = *it;
|
||||||
|
auto eraseTimer = false;
|
||||||
|
|
||||||
|
if(timer && (timer->files.empty() || !timer->isActive())) {
|
||||||
|
qCInfo(lcEngine) << "Stopping and erasing an expired/empty scheduled sync run timer.";
|
||||||
|
timer->stop();
|
||||||
|
eraseTimer = true;
|
||||||
|
} else if (!timer) {
|
||||||
|
qCInfo(lcEngine) << "Erasing a null scheduled sync run timer.";
|
||||||
|
eraseTimer = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if(eraseTimer) {
|
||||||
|
it = _scheduledSyncTimers.erase(it);
|
||||||
|
} else {
|
||||||
|
++it;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void SyncEngine::slotUnscheduleFilesDelayedSync()
|
||||||
|
{
|
||||||
|
if (!_discoveryPhase || _discoveryPhase->_filesUnscheduleSync.empty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const auto &file : _discoveryPhase->_filesUnscheduleSync) {
|
||||||
|
const auto fileSyncRunTimer = _filesScheduledForLaterSync.value(file);
|
||||||
|
|
||||||
|
if (fileSyncRunTimer) {
|
||||||
|
fileSyncRunTimer->files.remove(file);
|
||||||
|
|
||||||
|
// Below is only needed for logging
|
||||||
|
const auto currentMSecsSinceEpoch = QDateTime::currentMSecsSinceEpoch();
|
||||||
|
const auto scheduledSyncTimerMSecs = fileSyncRunTimer->remainingTime();
|
||||||
|
const auto timerExpireDate = QDateTime::fromMSecsSinceEpoch(currentMSecsSinceEpoch + scheduledSyncTimerMSecs);
|
||||||
|
qCInfo(lcEngine) << "Removed" << file << "from sync run timer elapsing at" << timerExpireDate
|
||||||
|
<< "this timer is still running for files:" << fileSyncRunTimer->files;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
slotCleanupScheduledSyncTimers();
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace OCC
|
} // namespace OCC
|
||||||
|
|
|
@ -213,7 +213,50 @@ private slots:
|
||||||
void slotInsufficientLocalStorage();
|
void slotInsufficientLocalStorage();
|
||||||
void slotInsufficientRemoteStorage();
|
void slotInsufficientRemoteStorage();
|
||||||
|
|
||||||
|
void slotScheduleFilesDelayedSync();
|
||||||
|
void slotUnscheduleFilesDelayedSync();
|
||||||
|
void slotCleanupScheduledSyncTimers();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
// Some files need a sync run to be executed at a specified time after
|
||||||
|
// their status is scheduled to change (e.g. lock status will expire in
|
||||||
|
// 20 minutes.)
|
||||||
|
//
|
||||||
|
// Rather than execute a sync run for each file that needs one, we want
|
||||||
|
// to schedule as few sync runs as possible, trying to have the state of
|
||||||
|
// these files updated in a timely manner without scheduling runs too
|
||||||
|
// frequently. We can therefore group files into a bucket.
|
||||||
|
//
|
||||||
|
// A bucket contains a group of files requiring a sync run in close
|
||||||
|
// proximity to each other, with an assigned sync timer interval that can
|
||||||
|
// be used to schedule a sync run which will update all the files in the
|
||||||
|
// bucket at the time their state is scheduled to change.
|
||||||
|
//
|
||||||
|
// In the pair, first is the actual time at which the bucket is going to
|
||||||
|
// have its sync scheduled. Second is the vector of all the (paths of)
|
||||||
|
// files that fall into this bucket.
|
||||||
|
//
|
||||||
|
// See SyncEngine::groupNeededScheduledSyncRuns and
|
||||||
|
// SyncEngine::slotScheduleFilesDelayedSync for usage.
|
||||||
|
struct ScheduledSyncBucket {
|
||||||
|
qint64 scheduledSyncTimerSecs;
|
||||||
|
QVector<QString> files;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Sometimes we schedule a timer for, say, 10 files. But we receive updated
|
||||||
|
// data from an earlier sync run and we no longer need a scheduled sync.
|
||||||
|
//
|
||||||
|
// E.g. we had a scheduled sync timer going for a file with a lock state
|
||||||
|
// scheduled to expire, but someone already unlocked the file on the web UI
|
||||||
|
//
|
||||||
|
// By keeping a counter of the files depending on this timer we can
|
||||||
|
// perform "garbage collection", by killing the timer if there are no
|
||||||
|
// longer any files depending on the scheduled sync run.
|
||||||
|
class ScheduledSyncTimer : public QTimer {
|
||||||
|
public:
|
||||||
|
QSet<QString> files;
|
||||||
|
};
|
||||||
|
|
||||||
bool checkErrorBlacklisting(SyncFileItem &item);
|
bool checkErrorBlacklisting(SyncFileItem &item);
|
||||||
|
|
||||||
// Cleans up unnecessary downloadinfo entries in the journal as well
|
// Cleans up unnecessary downloadinfo entries in the journal as well
|
||||||
|
@ -232,6 +275,24 @@ private:
|
||||||
// cleanup and emit the finished signal
|
// cleanup and emit the finished signal
|
||||||
void finalize(bool success);
|
void finalize(bool success);
|
||||||
|
|
||||||
|
// Aggregate scheduled sync runs into interval buckets. Can be used to
|
||||||
|
// schedule a sync run per bucket instead of per file, reducing load.
|
||||||
|
//
|
||||||
|
// Bucket classification is done by simply dividing the seconds until
|
||||||
|
// scheduled sync time by the interval (note -- integer division!)
|
||||||
|
QHash<qint64, ScheduledSyncBucket> groupNeededScheduledSyncRuns(const qint64 interval) const;
|
||||||
|
|
||||||
|
// Checks if there is already a scheduled sync run timer active near the
|
||||||
|
// time provided as the parameter.
|
||||||
|
//
|
||||||
|
// If this timer will expire within the interval provided, the return is
|
||||||
|
// true.
|
||||||
|
//
|
||||||
|
// If this expiration occurs before the scheduled sync run provided as the
|
||||||
|
// parameter, it is rescheduled to expire at the time of the parameter.
|
||||||
|
QSharedPointer<SyncEngine::ScheduledSyncTimer> nearbyScheduledSyncTimer(const qint64 scheduledSyncTimerSecs,
|
||||||
|
const qint64 intervalSecs) const;
|
||||||
|
|
||||||
static bool s_anySyncRunning; //true when one sync is running somewhere (for debugging)
|
static bool s_anySyncRunning; //true when one sync is running somewhere (for debugging)
|
||||||
|
|
||||||
// Must only be acessed during update and reconcile
|
// Must only be acessed during update and reconcile
|
||||||
|
@ -303,6 +364,16 @@ private:
|
||||||
std::set<QString> _localDiscoveryPaths;
|
std::set<QString> _localDiscoveryPaths;
|
||||||
|
|
||||||
QStringList _leadingAndTrailingSpacesFilesAllowed;
|
QStringList _leadingAndTrailingSpacesFilesAllowed;
|
||||||
|
|
||||||
|
// Hash of files we have scheduled for later sync runs, along with a
|
||||||
|
// pointer to the timer which will trigger the sync run for it.
|
||||||
|
//
|
||||||
|
// NOTE: these sync timers are not unique and will likely be shared
|
||||||
|
// between several files
|
||||||
|
QHash<QString, QSharedPointer<ScheduledSyncTimer>> _filesScheduledForLaterSync;
|
||||||
|
|
||||||
|
// A vector of all the (unique) scheduled sync timers
|
||||||
|
QVector<QSharedPointer<ScheduledSyncTimer>> _scheduledSyncTimers;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -620,14 +620,16 @@ private slots:
|
||||||
const QString barFileAaaSubFolder("aaa/subfolder/bar");
|
const QString barFileAaaSubFolder("aaa/subfolder/bar");
|
||||||
|
|
||||||
fakeFolder.remoteModifier().insert(fooFileRootFolder);
|
fakeFolder.remoteModifier().insert(fooFileRootFolder);
|
||||||
|
|
||||||
fakeFolder.remoteModifier().insert(barFileRootFolder);
|
fakeFolder.remoteModifier().insert(barFileRootFolder);
|
||||||
fakeFolder.remoteModifier().find("bar")->extraDavProperties = "<nc:lock>1</nc:lock>"
|
|
||||||
"<nc:lock-owner-type>0</nc:lock-owner-type>"
|
const auto lockedFileDavProps = QByteArray("<nc:lock>1</nc:lock>"
|
||||||
"<nc:lock-owner>user1</nc:lock-owner>"
|
"<nc:lock-owner-type>0</nc:lock-owner-type>"
|
||||||
"<nc:lock-owner-displayname>user1</nc:lock-owner-displayname>"
|
"<nc:lock-owner>user1</nc:lock-owner>"
|
||||||
"<nc:lock-owner-editor>user1</nc:lock-owner-editor>"
|
"<nc:lock-owner-displayname>user1</nc:lock-owner-displayname>"
|
||||||
"<nc:lock-time>1648046707</nc:lock-time>";
|
"<nc:lock-owner-editor>user1</nc:lock-owner-editor>"
|
||||||
|
"<nc:lock-time>1648046707</nc:lock-time>");
|
||||||
|
|
||||||
|
fakeFolder.remoteModifier().find("bar")->extraDavProperties = lockedFileDavProps;
|
||||||
|
|
||||||
fakeFolder.remoteModifier().mkdir(QStringLiteral("subfolder"));
|
fakeFolder.remoteModifier().mkdir(QStringLiteral("subfolder"));
|
||||||
fakeFolder.remoteModifier().insert(fooFileSubFolder);
|
fakeFolder.remoteModifier().insert(fooFileSubFolder);
|
||||||
|
@ -637,12 +639,26 @@ private slots:
|
||||||
fakeFolder.remoteModifier().insert(fooFileAaaSubFolder);
|
fakeFolder.remoteModifier().insert(fooFileAaaSubFolder);
|
||||||
fakeFolder.remoteModifier().insert(barFileAaaSubFolder);
|
fakeFolder.remoteModifier().insert(barFileAaaSubFolder);
|
||||||
|
|
||||||
QVERIFY(fakeFolder.syncOnce());
|
ItemCompletedSpy completeSpy(fakeFolder);
|
||||||
|
|
||||||
fakeFolder.remoteModifier().find("bar")->extraDavProperties = "<nc:lock>0</nc:lock>";
|
completeSpy.clear();
|
||||||
|
QVERIFY(fakeFolder.syncOnce());
|
||||||
|
QCOMPARE(completeSpy.findItem("bar")->_locked, OCC::SyncFileItem::LockStatus::LockedItem);
|
||||||
|
SyncJournalFileRecord fileRecordBefore;
|
||||||
|
QVERIFY(fakeFolder.syncJournal().getFileRecord(QStringLiteral("bar"), &fileRecordBefore));
|
||||||
|
QVERIFY(fileRecordBefore._lockstate._locked);
|
||||||
|
|
||||||
|
const auto unlockedFileDavProps = QByteArray("<nc:lock>0</nc:lock>");
|
||||||
|
fakeFolder.remoteModifier().find("bar")->extraDavProperties = unlockedFileDavProps;
|
||||||
|
|
||||||
fakeFolder.syncEngine().setLocalDiscoveryOptions(LocalDiscoveryStyle::DatabaseAndFilesystem);
|
fakeFolder.syncEngine().setLocalDiscoveryOptions(LocalDiscoveryStyle::DatabaseAndFilesystem);
|
||||||
|
|
||||||
|
completeSpy.clear();
|
||||||
QVERIFY(fakeFolder.syncOnce());
|
QVERIFY(fakeFolder.syncOnce());
|
||||||
|
QCOMPARE(completeSpy.findItem("bar")->_locked, OCC::SyncFileItem::LockStatus::UnlockedItem);
|
||||||
|
SyncJournalFileRecord fileRecordAfter;
|
||||||
|
QVERIFY(fakeFolder.syncJournal().getFileRecord(QStringLiteral("bar"), &fileRecordAfter));
|
||||||
|
QVERIFY(!fileRecordAfter._lockstate._locked);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue