Merge pull request #2268 from nextcloud/cherry_pick_testchunkingng_related_improvements

Cherry pick TestChunkingNG related improvements
This commit is contained in:
Kevin Ottens 2020-08-13 17:26:33 +02:00 committed by GitHub
commit 4117ac2913
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 184 additions and 21 deletions

View file

@ -343,7 +343,9 @@ static void _csync_merge_algorithm_visitor(csync_file_stat_t *cur, CSYNC * ctx)
auto remoteNode = ctx->current == REMOTE_REPLICA ? cur : other;
auto localNode = ctx->current == REMOTE_REPLICA ? other : cur;
remoteNode->instruction = CSYNC_INSTRUCTION_NONE;
localNode->instruction = up._modtime == localNode->modtime ? CSYNC_INSTRUCTION_UPDATE_METADATA : CSYNC_INSTRUCTION_SYNC;
localNode->instruction = up._modtime == localNode->modtime && up._size == localNode->size ?
CSYNC_INSTRUCTION_UPDATE_METADATA : CSYNC_INSTRUCTION_SYNC;
// Update the etag and other server metadata in the journal already
// (We can't use a typical CSYNC_INSTRUCTION_UPDATE_METADATA because
// we must not store the size/modtime from the file system)

View file

@ -83,7 +83,8 @@ void PropagateUploadFileNG::doStartUpload()
propagator()->_activeJobList.append(this);
const SyncJournalDb::UploadInfo progressInfo = propagator()->_journal->getUploadInfo(_item->_file);
if (progressInfo._valid && progressInfo.isChunked() && progressInfo._modtime == _item->_modtime) {
if (progressInfo._valid && progressInfo.isChunked() && progressInfo._modtime == _item->_modtime
&& progressInfo._size == qint64(_item->_size)) {
_transferId = progressInfo._transferid;
auto url = chunkUrl();
auto job = new LsColJob(propagator()->account(), url, this);
@ -143,6 +144,12 @@ void PropagateUploadFileNG::slotPropfindFinished()
qCCritical(lcPropagateUpload) << "Inconsistency while resuming " << _item->_file
<< ": the size on the server (" << _sent << ") is bigger than the size of the file ("
<< _fileToUpload._size << ")";
// Wipe the old chunking data.
// Fire and forget. Any error will be ignored.
(new DeleteJob(propagator()->account(), chunkUrl(), this))->start();
propagator()->_activeJobList.append(this);
startNewUpload();
return;
}
@ -231,6 +238,7 @@ void PropagateUploadFileNG::startNewUpload()
pi._transferid = _transferId;
pi._modtime = _item->_modtime;
pi._contentChecksum = _item->_checksumHeader;
pi._size = _item->_size;
propagator()->_journal->setUploadInfo(_item->_file, pi);
propagator()->_journal->commit("Upload info");
QMap<QByteArray, QByteArray> headers;

View file

@ -43,7 +43,7 @@ void PropagateUploadFileV1::doStartUpload()
const SyncJournalDb::UploadInfo progressInfo = propagator()->_journal->getUploadInfo(_item->_file);
if (progressInfo._valid && progressInfo.isChunked() && progressInfo._modtime == _item->_modtime
if (progressInfo._valid && progressInfo.isChunked() && progressInfo._modtime == _item->_modtime && progressInfo._size == qint64(_item->_size)
&& (progressInfo._contentChecksum == _item->_checksumHeader || progressInfo._contentChecksum.isEmpty() || _item->_checksumHeader.isEmpty())) {
_startChunk = progressInfo._chunk;
_transferId = progressInfo._transferid;
@ -59,6 +59,7 @@ void PropagateUploadFileV1::doStartUpload()
pi._modtime = _item->_modtime;
pi._errorCount = 0;
pi._contentChecksum = _item->_checksumHeader;
pi._size = _item->_size;
propagator()->_journal->setUploadInfo(_item->_file, pi);
propagator()->_journal->commit("Upload info");
}
@ -286,6 +287,7 @@ void PropagateUploadFileV1::slotPutFinished()
pi._modtime = _item->_modtime;
pi._errorCount = 0; // successful chunk upload resets
pi._contentChecksum = _item->_checksumHeader;
pi._size = _item->_size;
propagator()->_journal->setUploadInfo(_item->_file, pi);
propagator()->_journal->commit("Upload info");
startNextChunk();

View file

@ -40,6 +40,15 @@ static void partialUpload(FakeFolder &fakeFolder, const QString &name, int size)
[](int s, const FileInfo &i) { return s + i.size; }));
}
// Reduce max chunk size a bit so we get more chunks
static void setChunkSize(SyncEngine &engine, quint64 size)
{
SyncOptions options;
options._maxChunkSize = size;
options._initialChunkSize = size;
options._minChunkSize = size;
engine.setSyncOptions(options);
}
class TestChunkingNG : public QObject
{
@ -50,7 +59,9 @@ private slots:
void testFileUpload() {
FakeFolder fakeFolder{FileInfo::A12_B12_C12_S12()};
fakeFolder.syncEngine().account()->setCapabilities({ { "dav", QVariantMap{ {"chunking", "1.0"} } } });
const int size = 300 * 1000 * 1000; // 300 MB
setChunkSize(fakeFolder.syncEngine(), 1 * 1000 * 1000);
const int size = 10 * 1000 * 1000; // 10 MB
fakeFolder.localModifier().insert("A/a0", size);
QVERIFY(fakeFolder.syncOnce());
QCOMPARE(fakeFolder.currentLocalState(), fakeFolder.currentRemoteState());
@ -64,25 +75,29 @@ private slots:
QCOMPARE(fakeFolder.uploadState().children.count(), 2); // the transfer was done with chunking
}
void testResume () {
// Test resuming when there's a confusing chunk added
void testResume1() {
FakeFolder fakeFolder{FileInfo::A12_B12_C12_S12()};
fakeFolder.syncEngine().account()->setCapabilities({ { "dav", QVariantMap{ {"chunking", "1.0"} } } });
const int size = 300 * 1000 * 1000; // 300 MB
const int size = 10 * 1000 * 1000; // 10 MB
setChunkSize(fakeFolder.syncEngine(), 1 * 1000 * 1000);
partialUpload(fakeFolder, "A/a0", size);
QCOMPARE(fakeFolder.uploadState().children.count(), 1);
auto chunkingId = fakeFolder.uploadState().children.first().name;
const auto &chunkMap = fakeFolder.uploadState().children.first().children;
quint64 uploadedSize = std::accumulate(chunkMap.begin(), chunkMap.end(), 0LL, [](quint64 s, const FileInfo &f) { return s + f.size; });
QVERIFY(uploadedSize > 50 * 1000 * 1000); // at least 50 MB
QVERIFY(uploadedSize > 2 * 1000 * 1000); // at least 2 MB
// Add a fake file to make sure it gets deleted
// Add a fake chunk to make sure it gets deleted
fakeFolder.uploadState().children.first().insert("10000", size);
fakeFolder.setServerOverride([&](QNetworkAccessManager::Operation op, const QNetworkRequest &request, QIODevice *) -> QNetworkReply * {
if (op == QNetworkAccessManager::PutOperation) {
// Test that we properly resuming and are not sending past data again.
Q_ASSERT(request.rawHeader("OC-Chunk-Offset").toULongLong() >= uploadedSize);
} else if (op == QNetworkAccessManager::DeleteOperation) {
Q_ASSERT(request.url().path().endsWith("/10000"));
}
return nullptr;
});
@ -96,19 +111,147 @@ private slots:
QCOMPARE(fakeFolder.uploadState().children.first().name, chunkingId);
}
// Test resuming when one of the uploaded chunks got removed
void testResume2() {
FakeFolder fakeFolder{FileInfo::A12_B12_C12_S12()};
fakeFolder.syncEngine().account()->setCapabilities({ { "dav", QVariantMap{ {"chunking", "1.0"} } } });
setChunkSize(fakeFolder.syncEngine(), 1 * 1000 * 1000);
const int size = 30 * 1000 * 1000; // 30 MB
partialUpload(fakeFolder, "A/a0", size);
QCOMPARE(fakeFolder.uploadState().children.count(), 1);
auto chunkingId = fakeFolder.uploadState().children.first().name;
const auto &chunkMap = fakeFolder.uploadState().children.first().children;
quint64 uploadedSize = std::accumulate(chunkMap.begin(), chunkMap.end(), 0LL, [](quint64 s, const FileInfo &f) { return s + f.size; });
QVERIFY(uploadedSize > 2 * 1000 * 1000); // at least 50 MB
QVERIFY(chunkMap.size() >= 3); // at least three chunks
QStringList chunksToDelete;
// Remove the second chunk, so all further chunks will be deleted and resent
auto firstChunk = chunkMap.first();
auto secondChunk = *(chunkMap.begin() + 1);
for (const auto& name : chunkMap.keys().mid(2)) {
chunksToDelete.append(name);
}
fakeFolder.uploadState().children.first().remove(secondChunk.name);
QStringList deletedPaths;
fakeFolder.setServerOverride([&](QNetworkAccessManager::Operation op, const QNetworkRequest &request, QIODevice *) -> QNetworkReply * {
if (op == QNetworkAccessManager::PutOperation) {
// Test that we properly resuming, not resending the first chunk
Q_ASSERT(request.rawHeader("OC-Chunk-Offset").toLongLong() >= firstChunk.size);
} else if (op == QNetworkAccessManager::DeleteOperation) {
deletedPaths.append(request.url().path());
}
return nullptr;
});
QVERIFY(fakeFolder.syncOnce());
for (const auto& toDelete : chunksToDelete) {
bool wasDeleted = false;
for (const auto& deleted : deletedPaths) {
if (deleted.mid(deleted.lastIndexOf('/') + 1) == toDelete) {
wasDeleted = true;
break;
}
}
QVERIFY(wasDeleted);
}
QCOMPARE(fakeFolder.currentLocalState(), fakeFolder.currentRemoteState());
QCOMPARE(fakeFolder.currentRemoteState().find("A/a0")->size, size);
// The same chunk id was re-used
QCOMPARE(fakeFolder.uploadState().children.count(), 1);
QCOMPARE(fakeFolder.uploadState().children.first().name, chunkingId);
}
// Test resuming when all chunks are already present
void testResume3() {
FakeFolder fakeFolder{FileInfo::A12_B12_C12_S12()};
fakeFolder.syncEngine().account()->setCapabilities({ { "dav", QVariantMap{ {"chunking", "1.0"} } } });
const int size = 30 * 1000 * 1000; // 30 MB
setChunkSize(fakeFolder.syncEngine(), 1 * 1000 * 1000);
partialUpload(fakeFolder, "A/a0", size);
QCOMPARE(fakeFolder.uploadState().children.count(), 1);
auto chunkingId = fakeFolder.uploadState().children.first().name;
const auto &chunkMap = fakeFolder.uploadState().children.first().children;
quint64 uploadedSize = std::accumulate(chunkMap.begin(), chunkMap.end(), 0LL, [](quint64 s, const FileInfo &f) { return s + f.size; });
QVERIFY(uploadedSize > 5 * 1000 * 1000); // at least 5 MB
// Add a chunk that makes the file completely uploaded
fakeFolder.uploadState().children.first().insert(
QString::number(chunkMap.size()).rightJustified(8, '0'), size - uploadedSize);
bool sawPut = false;
bool sawDelete = false;
bool sawMove = false;
fakeFolder.setServerOverride([&](QNetworkAccessManager::Operation op, const QNetworkRequest &request, QIODevice *) -> QNetworkReply * {
if (op == QNetworkAccessManager::PutOperation) {
sawPut = true;
} else if (op == QNetworkAccessManager::DeleteOperation) {
sawDelete = true;
} else if (request.attribute(QNetworkRequest::CustomVerbAttribute) == "MOVE") {
sawMove = true;
}
return nullptr;
});
QVERIFY(fakeFolder.syncOnce());
QVERIFY(sawMove);
QVERIFY(!sawPut);
QVERIFY(!sawDelete);
QCOMPARE(fakeFolder.currentLocalState(), fakeFolder.currentRemoteState());
QCOMPARE(fakeFolder.currentRemoteState().find("A/a0")->size, size);
// The same chunk id was re-used
QCOMPARE(fakeFolder.uploadState().children.count(), 1);
QCOMPARE(fakeFolder.uploadState().children.first().name, chunkingId);
}
// Test resuming (or rather not resuming!) for the error case of the sum of
// chunk sizes being larger than the file size
void testResume4() {
FakeFolder fakeFolder{FileInfo::A12_B12_C12_S12()};
fakeFolder.syncEngine().account()->setCapabilities({ { "dav", QVariantMap{ {"chunking", "1.0"} } } });
const int size = 30 * 1000 * 1000; // 30 MB
setChunkSize(fakeFolder.syncEngine(), 1 * 1000 * 1000);
partialUpload(fakeFolder, "A/a0", size);
QCOMPARE(fakeFolder.uploadState().children.count(), 1);
auto chunkingId = fakeFolder.uploadState().children.first().name;
const auto &chunkMap = fakeFolder.uploadState().children.first().children;
quint64 uploadedSize = std::accumulate(chunkMap.begin(), chunkMap.end(), 0LL, [](quint64 s, const FileInfo &f) { return s + f.size; });
QVERIFY(uploadedSize > 5 * 1000 * 1000); // at least 5 MB
// Add a chunk that makes the file more than completely uploaded
fakeFolder.uploadState().children.first().insert(
QString::number(chunkMap.size()).rightJustified(8, '0'), size - uploadedSize + 100);
QVERIFY(fakeFolder.syncOnce());
QCOMPARE(fakeFolder.currentLocalState(), fakeFolder.currentRemoteState());
QCOMPARE(fakeFolder.currentRemoteState().find("A/a0")->size, size);
// Used a new transfer id but wiped the old one
QCOMPARE(fakeFolder.uploadState().children.count(), 1);
QVERIFY(fakeFolder.uploadState().children.first().name != chunkingId);
}
// Check what happens when we abort during the final MOVE and the
// the final MOVE takes longer than the abort-delay
void testLateAbortHard()
{
FakeFolder fakeFolder{ FileInfo::A12_B12_C12_S12() };
fakeFolder.syncEngine().account()->setCapabilities({ { "dav", QVariantMap{ { "chunking", "1.0" } } }, { "checksums", QVariantMap{ { "supportedTypes", QStringList() << "SHA1" } } } });
const int size = 150 * 1000 * 1000;
const int size = 15 * 1000 * 1000; // 15 MB
setChunkSize(fakeFolder.syncEngine(), 1 * 1000 * 1000);
// Make the MOVE never reply, but trigger a client-abort and apply the change remotely
auto parent = new QObject;
QByteArray moveChecksumHeader;
int nGET = 0;
int responseDelay = 10000; // bigger than abort-wait timeout
int responseDelay = 100000; // bigger than abort-wait timeout
fakeFolder.setServerOverride([&](QNetworkAccessManager::Operation op, const QNetworkRequest &request, QIODevice *) -> QNetworkReply * {
if (request.attribute(QNetworkRequest::CustomVerbAttribute) == "MOVE") {
QTimer::singleShot(50, parent, [&]() { fakeFolder.syncEngine().abort(); });
@ -185,11 +328,12 @@ private slots:
{
FakeFolder fakeFolder{ FileInfo::A12_B12_C12_S12() };
fakeFolder.syncEngine().account()->setCapabilities({ { "dav", QVariantMap{ { "chunking", "1.0" } } }, { "checksums", QVariantMap{ { "supportedTypes", QStringList() << "SHA1" } } } });
const int size = 150 * 1000 * 1000;
const int size = 15 * 1000 * 1000; // 15 MB
setChunkSize(fakeFolder.syncEngine(), 1 * 1000 * 1000);
// Make the MOVE never reply, but trigger a client-abort and apply the change remotely
auto parent = new QObject;
int responseDelay = 2000; // smaller than abort-wait timeout
int responseDelay = 200; // smaller than abort-wait timeout
fakeFolder.setServerOverride([&](QNetworkAccessManager::Operation op, const QNetworkRequest &request, QIODevice *) -> QNetworkReply * {
if (request.attribute(QNetworkRequest::CustomVerbAttribute) == "MOVE") {
QTimer::singleShot(50, parent, [&]() { fakeFolder.syncEngine().abort(); });
@ -198,7 +342,6 @@ private slots:
return nullptr;
});
// Test 1: NEW file aborted
fakeFolder.localModifier().insert("A/a0", size);
QVERIFY(fakeFolder.syncOnce());
@ -215,7 +358,9 @@ private slots:
FakeFolder fakeFolder{FileInfo::A12_B12_C12_S12()};
fakeFolder.syncEngine().account()->setCapabilities({ { "dav", QVariantMap{ {"chunking", "1.0"} } } });
const int size = 300 * 1000 * 1000; // 300 MB
const int size = 10 * 1000 * 1000; // 10 MB
setChunkSize(fakeFolder.syncEngine(), 1 * 1000 * 1000);
partialUpload(fakeFolder, "A/a0", size);
QCOMPARE(fakeFolder.uploadState().children.count(), 1);
auto chunkingId = fakeFolder.uploadState().children.first().name;
@ -238,7 +383,9 @@ private slots:
FakeFolder fakeFolder{FileInfo::A12_B12_C12_S12()};
fakeFolder.syncEngine().account()->setCapabilities({ { "dav", QVariantMap{ {"chunking", "1.0"} } } });
const int size = 300 * 1000 * 1000; // 300 MB
const int size = 10 * 1000 * 1000; // 10 MB
setChunkSize(fakeFolder.syncEngine(), 1 * 1000 * 1000);
partialUpload(fakeFolder, "A/a0", size);
QCOMPARE(fakeFolder.uploadState().children.count(), 1);
@ -252,7 +399,8 @@ private slots:
void testCreateConflictWhileSyncing() {
FakeFolder fakeFolder{FileInfo::A12_B12_C12_S12()};
fakeFolder.syncEngine().account()->setCapabilities({ { "dav", QVariantMap{ {"chunking", "1.0"} } } });
const int size = 150 * 1000 * 1000; // 150 MB
const int size = 10 * 1000 * 1000; // 10 MB
setChunkSize(fakeFolder.syncEngine(), 1 * 1000 * 1000);
// Put a file on the server and download it.
fakeFolder.remoteModifier().insert("A/a0", size);
@ -307,7 +455,8 @@ private slots:
FakeFolder fakeFolder{FileInfo::A12_B12_C12_S12()};
fakeFolder.syncEngine().account()->setCapabilities({ { "dav", QVariantMap{ {"chunking", "1.0"} } } });
const int size = 150 * 1000 * 1000; // 150 MB
const int size = 10 * 1000 * 1000; // 10 MB
setChunkSize(fakeFolder.syncEngine(), 1 * 1000 * 1000);
fakeFolder.localModifier().insert("A/a0", size);
@ -345,7 +494,8 @@ private slots:
FakeFolder fakeFolder{FileInfo::A12_B12_C12_S12()};
fakeFolder.syncEngine().account()->setCapabilities({ { "dav", QVariantMap{ {"chunking", "1.0"} } } });
const int size = 300 * 1000 * 1000; // 300 MB
const int size = 30 * 1000 * 1000; // 30 MB
setChunkSize(fakeFolder.syncEngine(), 1 * 1000 * 1000);
partialUpload(fakeFolder, "A/a0", size);
QCOMPARE(fakeFolder.uploadState().children.count(), 1);
auto chunkingId = fakeFolder.uploadState().children.first().name;
@ -375,7 +525,8 @@ private slots:
QFETCH(bool, chunking);
FakeFolder fakeFolder{ FileInfo::A12_B12_C12_S12() };
fakeFolder.syncEngine().account()->setCapabilities({ { "dav", QVariantMap{ { "chunking", "1.0" } } }, { "checksums", QVariantMap{ { "supportedTypes", QStringList() << "SHA1" } } } });
const int size = chunking ? 150 * 1000 * 1000 : 300;
const int size = chunking ? 1 * 1000 * 1000 : 300;
setChunkSize(fakeFolder.syncEngine(), 300 * 1000);
// Make the MOVE never reply, but trigger a client-abort and apply the change remotely
QByteArray checksumHeader;
@ -400,7 +551,6 @@ private slots:
return nullptr;
});
// Test 1: a NEW file
fakeFolder.localModifier().insert("A/a0", size);
QVERIFY(!fakeFolder.syncOnce()); // timeout!

View file

@ -36,6 +36,7 @@ private slots:
uploadInfo._transferid = 1;
uploadInfo._valid = true;
uploadInfo._modtime = Utility::qDateTimeToTime_t(modTime);
uploadInfo._size = size;
fakeFolder.syncEngine().journal()->setUploadInfo("A/a0", uploadInfo);
fakeFolder.uploadState().mkdir("1");