SyncEngine: Use QSet for the seenFiles rather than QHash.

We can save some memory here as the seenFiles list can be long.
This commit is contained in:
Klaas Freitag 2014-06-17 16:29:38 +02:00
parent b91967f4d9
commit b71881d300
4 changed files with 6 additions and 4 deletions

View file

@ -265,7 +265,7 @@ int SyncEngine::treewalkFile( TREE_WALK_FILE *file, bool remote )
item._fileId = file->file_id;
// record the seen files to be able to clean the journal later
_seenFiles[item._file] = QString();
_seenFiles.insert(item._file);
switch(file->error_status) {
case CSYNC_STATUS_OK:
@ -655,6 +655,7 @@ void SyncEngine::slotFinished()
if( ! _journal->postSyncCleanup( _seenFiles ) ) {
qDebug() << "Cleaning of synced ";
}
_journal->commit("All Finished.", false);
emit treeWalkResult(_syncedItems);
finalize();

View file

@ -21,6 +21,7 @@
#include <QMutex>
#include <QThread>
#include <QString>
#include <QSet>
#include <qelapsedtimer.h>
#include <csync.h>
@ -115,7 +116,7 @@ private:
SyncJournalDb *_journal;
QScopedPointer <OwncloudPropagator> _propagator;
QString _lastDeleted; // if the last item was a path and it has been deleted
QHash <QString, QString> _seenFiles;
QSet<QString> _seenFiles;
QThread _thread;
Progress::Info _progressInfo;

View file

@ -491,7 +491,7 @@ SyncJournalFileRecord SyncJournalDb::getFileRecord( const QString& filename )
return rec;
}
bool SyncJournalDb::postSyncCleanup(const QHash<QString, QString> &items )
bool SyncJournalDb::postSyncCleanup(const QSet<QString> &items )
{
QMutexLocker locker(&_mutex);

View file

@ -79,7 +79,7 @@ public:
*/
void avoidReadFromDbOnNextSync(const QString& fileName);
bool postSyncCleanup( const QHash<QString, QString>& items );
bool postSyncCleanup( const QSet<QString>& items );
/* Because sqlite transactions is really slow, we encapsulate everything in big transactions
* Commit will actually commit the transaction and create a new one.