Upload: Store the size in the UploadInfo, and compare it when resolving potential conflict

This is about the conflicts that happens when the file has been uploaded
correctly to the server, but the etag was not recieved because the connection
was closed before we got the reply.

We used to compare only the mtime when comparing the uploaded file and the
existing file.  However, to be perfectly correct, we also should check the
size.

This was found because TestChunkingNG::connectionDroppedBeforeEtagRecieved is
flaky. Example of faillure found in https://drone.owncloud.com/owncloud/client/481/5
while testing PR #6626

(very trimmed log:)

06-29 07:58:02:015 [ info sync.csync.csync ]:	## Starting local discovery ##
06-29 07:58:02:016 [ info sync.csync.updater ]:	Database entry found, compare: 1530259082 <-> 1530259051, etag:  <-> 1644a8c8750, inode: 1935629 <-> 1935629, size: 301 <-> 300, perms: 0 <-> ff, type: 0 <-> 0, checksum:  <-> SHA1:cc9adedebe27a6259efb8d6ed09f4f2eff559ad1, ignore: 0
06-29 07:58:02:016 [ info sync.csync.updater ]:	file: A/a0, instruction: INSTRUCTION_EVAL <<=
06-29 07:58:02:972 [ warning sync.networkjob ]:	QNetworkReply::NetworkError(OperationCanceledError) "Connection timed out" QVariant(Invalid)
.. next sync...
06-29 07:58:02:980 [ info sync.engine ]:	#### Discovery start ####################################################
06-29 07:58:02:981 [ info sync.csync.csync ]:	## Starting local discovery ##
06-29 07:58:02:983 [ info sync.csync.updater ]:	Database entry found, compare: 1530259082 <-> 1530259051, etag:  <-> 1644a8c8750, inode: 1935629 <-> 1935629, size: 302 <-> 300, perms: 0 <-> ff, type: 0 <-> 0, checksum:  <-> SHA1:cc9adedebe27a6259efb8d6ed09f4f2eff559ad1, ignore: 0
06-29 07:58:02:983 [ info sync.csync.updater ]:	file: A/a0, instruction: INSTRUCTION_EVAL <<=
06-29 07:58:02:985 [ info sync.csync.csync ]:	## Starting remote discovery ##
06-29 07:58:02:985 [ info sync.networkjob ]:	OCC::LsColJob created for "http://localhost/owncloud" + "" "OCC::DiscoverySingleDirectoryJob"
06-29 07:58:02:987 [ info sync.csync.updater ]:	Database entry found, compare: 1530259082 <-> 1530259051, etag: 1644a8c8b26 <-> 1644a8c8750, inode: 0 <-> 1935629, size: 301 <-> 300, perms: ff <-> ff, type: 0 <-> 0, checksum: SHA1:5adcdac9608ae0811247f07f4cf1ab0a2ef99154 <-> SHA1:cc9adedebe27a6259efb8d6ed09f4f2eff559ad1, ignore: 0
06-29 07:58:02:987 [ info sync.csync.updater ]:	file: A/a0, instruction: INSTRUCTION_EVAL <<=
06-29 07:58:02:989 [ info sync.csync.csync ]:	Update detection for remote replica took 0.004 seconds walking 13 files
06-29 07:58:02:990 [ info sync.engine ]:	#### Discovery end ####################################################  9 ms
06-29 07:58:02:990 [ info sync.database ]:	Updating file record for path: "A/a0" inode: 1935629 modtime: 1530259082 type: 0 etag: "1644a8c8b26" fileId: "16383ea4" remotePerm: "WDNVCKR" fileSize: 301 checksum: "SHA1:cc9adedebe27a6259efb8d6ed09f4f2eff559ad1"
06-29 07:58:02:990 [ info sync.csync.reconciler ]:	INSTRUCTION_UPDATE_METADATA    client file: A/a0
06-29 07:58:02:990 [ info sync.csync.csync ]:	Reconciliation for local replica took  0 seconds visiting  13  files.
06-29 07:58:02:990 [ info sync.csync.reconciler ]:	INSTRUCTION_UPDATE_METADATA    server dir:  A
06-29 07:58:02:990 [ info sync.csync.csync ]:	Reconciliation for remote replica took  0 seconds visiting  13  files.
06-29 07:58:02:990 [ info sync.engine ]:	#### Reconcile end ####################################################  9 ms
06-29 07:58:02:990 [ info sync.database ]:	Updating local metadata for: "A/a0" 1530259082 302 1935629
FAIL!  : TestChunkingNG::connectionDroppedBeforeEtagRecieved(small file) '!fakeFolder.syncOnce()' returned FALSE. ()
This commit is contained in:
Olivier Goffart 2018-06-29 10:43:01 +02:00 committed by Kevin Ottens
parent c2324327ef
commit a7847a4e82
No known key found for this signature in database
GPG key ID: 074BBBCB8DECC9E2
4 changed files with 10 additions and 3 deletions

View file

@ -343,7 +343,9 @@ static void _csync_merge_algorithm_visitor(csync_file_stat_t *cur, CSYNC * ctx)
auto remoteNode = ctx->current == REMOTE_REPLICA ? cur : other;
auto localNode = ctx->current == REMOTE_REPLICA ? other : cur;
remoteNode->instruction = CSYNC_INSTRUCTION_NONE;
localNode->instruction = up._modtime == localNode->modtime ? CSYNC_INSTRUCTION_UPDATE_METADATA : CSYNC_INSTRUCTION_SYNC;
localNode->instruction = up._modtime == localNode->modtime && up._size == localNode->size ?
CSYNC_INSTRUCTION_UPDATE_METADATA : CSYNC_INSTRUCTION_SYNC;
// Update the etag and other server metadata in the journal already
// (We can't use a typical CSYNC_INSTRUCTION_UPDATE_METADATA because
// we must not store the size/modtime from the file system)

View file

@ -83,7 +83,8 @@ void PropagateUploadFileNG::doStartUpload()
propagator()->_activeJobList.append(this);
const SyncJournalDb::UploadInfo progressInfo = propagator()->_journal->getUploadInfo(_item->_file);
if (progressInfo._valid && progressInfo.isChunked() && progressInfo._modtime == _item->_modtime) {
if (progressInfo._valid && progressInfo.isChunked() && progressInfo._modtime == _item->_modtime
&& progressInfo._size == qint64(_item->_size)) {
_transferId = progressInfo._transferid;
auto url = chunkUrl();
auto job = new LsColJob(propagator()->account(), url, this);
@ -237,6 +238,7 @@ void PropagateUploadFileNG::startNewUpload()
pi._transferid = _transferId;
pi._modtime = _item->_modtime;
pi._contentChecksum = _item->_checksumHeader;
pi._size = _item->_size;
propagator()->_journal->setUploadInfo(_item->_file, pi);
propagator()->_journal->commit("Upload info");
QMap<QByteArray, QByteArray> headers;

View file

@ -43,7 +43,7 @@ void PropagateUploadFileV1::doStartUpload()
const SyncJournalDb::UploadInfo progressInfo = propagator()->_journal->getUploadInfo(_item->_file);
if (progressInfo._valid && progressInfo.isChunked() && progressInfo._modtime == _item->_modtime
if (progressInfo._valid && progressInfo.isChunked() && progressInfo._modtime == _item->_modtime && progressInfo._size == qint64(_item->_size)
&& (progressInfo._contentChecksum == _item->_checksumHeader || progressInfo._contentChecksum.isEmpty() || _item->_checksumHeader.isEmpty())) {
_startChunk = progressInfo._chunk;
_transferId = progressInfo._transferid;
@ -59,6 +59,7 @@ void PropagateUploadFileV1::doStartUpload()
pi._modtime = _item->_modtime;
pi._errorCount = 0;
pi._contentChecksum = _item->_checksumHeader;
pi._size = _item->_size;
propagator()->_journal->setUploadInfo(_item->_file, pi);
propagator()->_journal->commit("Upload info");
}
@ -286,6 +287,7 @@ void PropagateUploadFileV1::slotPutFinished()
pi._modtime = _item->_modtime;
pi._errorCount = 0; // successful chunk upload resets
pi._contentChecksum = _item->_checksumHeader;
pi._size = _item->_size;
propagator()->_journal->setUploadInfo(_item->_file, pi);
propagator()->_journal->commit("Upload info");
startNextChunk();

View file

@ -36,6 +36,7 @@ private slots:
uploadInfo._transferid = 1;
uploadInfo._valid = true;
uploadInfo._modtime = Utility::qDateTimeToTime_t(modTime);
uploadInfo._size = size;
fakeFolder.syncEngine().journal()->setUploadInfo("A/a0", uploadInfo);
fakeFolder.uploadState().mkdir("1");