Sync: Fix sync of deletions during 503. #2894

This commit is contained in:
Christian Kamm 2015-04-08 10:50:08 +02:00
parent 5a83636f81
commit d986011067
4 changed files with 31 additions and 5 deletions

View file

@ -384,6 +384,7 @@ int SyncEngine::treewalkFile( TREE_WALK_FILE *file, bool remote )
case CSYNC_STATUS_STORAGE_UNAVAILABLE:
item._errorString = QLatin1String("Directory temporarily not available on server.");
item._status = SyncFileItem::SoftError;
_temporarilyUnavailablePaths.insert(item._file);
break;
default:
Q_ASSERT("Non handled error-status");
@ -700,6 +701,7 @@ void SyncEngine::slotDiscoveryJobFinished(int discoveryResult)
_hasRemoveFile = false;
bool walkOk = true;
_seenFiles.clear();
_temporarilyUnavailablePaths.clear();
if( csync_walk_local_tree(_csync_ctx, &treewalkLocal, 0) < 0 ) {
qDebug() << "Error in local treewalk.";
@ -863,7 +865,7 @@ void SyncEngine::slotFinished()
_anotherSyncNeeded = _anotherSyncNeeded || _propagator->_anotherSyncNeeded;
// emit the treewalk results.
if( ! _journal->postSyncCleanup( _seenFiles ) ) {
if( ! _journal->postSyncCleanup( _seenFiles, _temporarilyUnavailablePaths ) ) {
qDebug() << "Cleaning of synced ";
}

View file

@ -159,7 +159,21 @@ private:
QPointer<DiscoveryMainThread> _discoveryMainThread;
QSharedPointer <OwncloudPropagator> _propagator;
QString _lastDeleted; // if the last item was a path and it has been deleted
// After a sync, only the syncdb entries whose filenames appear in this
// set will be kept. See _temporarilyUnavailablePaths.
QSet<QString> _seenFiles;
// Some paths might be temporarily unavailable on the server, for
// example due to 503 Storage not available. Deleting information
// about the files from the database in these cases would lead to
// incorrect synchronization.
// Therefore all syncdb entries whose filename starts with one of
// the paths in this set will be kept.
// The specific case that fails otherwise is deleting a local file
// while the remote says storage not available.
QSet<QString> _temporarilyUnavailablePaths;
QThread _thread;
Progress::Info _progressInfo;

View file

@ -698,7 +698,8 @@ SyncJournalFileRecord SyncJournalDb::getFileRecord( const QString& filename )
return rec;
}
bool SyncJournalDb::postSyncCleanup(const QSet<QString> &items )
bool SyncJournalDb::postSyncCleanup(const QSet<QString>& filepathsToKeep,
const QSet<QString>& prefixesToKeep)
{
QMutexLocker locker(&_mutex);
@ -719,8 +720,16 @@ bool SyncJournalDb::postSyncCleanup(const QSet<QString> &items )
while(query.next()) {
const QString file = query.stringValue(1);
bool contained = items.contains(file);
if( !contained ) {
bool keep = filepathsToKeep.contains(file);
if( !keep ) {
foreach( const QString & prefix, prefixesToKeep ) {
if( file.startsWith(prefix) ) {
keep = true;
break;
}
}
}
if( !keep ) {
superfluousItems.append(query.stringValue(0));
}
}

View file

@ -97,7 +97,8 @@ public:
*/
void avoidReadFromDbOnNextSync(const QString& fileName);
bool postSyncCleanup( const QSet<QString>& items );
bool postSyncCleanup(const QSet<QString>& filepathsToKeep,
const QSet<QString>& prefixesToKeep);
/* Because sqlite transactions is really slow, we encapsulate everything in big transactions
* Commit will actually commit the transaction and create a new one.