remove propagate function from csync_owncloud

This commit is contained in:
Olivier Goffart 2013-10-29 13:39:59 +01:00
parent ae2b2c8cf8
commit f707dc1a8b
3 changed files with 14 additions and 776 deletions

View file

@ -93,13 +93,6 @@ static void clean_caches() {
char _buffer[PUT_BUFFER_SIZE];
/* Differance in usec between two time */
static int64_t _timediff(struct timeval x , struct timeval y)
{
return (y.tv_sec - x.tv_sec)*1000000 + (y.tv_usec - x.tv_usec);
}
/*
* helper method to build up a user text for SSL problems, called from the
* verify_sslcert callback.
@ -431,53 +424,6 @@ static int post_send_hook(ne_request *req, void *userdata,
return NE_REDIRECT;
}
/* called from neon */
static void ne_notify_status_cb (void *userdata, ne_session_status status,
const ne_session_status_info *info)
{
struct transfer_context *tc = (struct transfer_context*) userdata;
struct timeval now;
int bandwidth_limit = 0;
if ((status == ne_status_sending || status == ne_status_recving)) {
if (info->sr.total > 0) {
oc_notify_progress( tc->url, CSYNC_NOTIFY_PROGRESS,
chunked_done + info->sr.progress,
chunked_total_size ? chunked_total_size : info->sr.total );
}
if (chunked_total_size && info->sr.total > 0 && info->sr.total == info->sr.progress) {
chunked_done += info->sr.total;
DEBUG_WEBDAV("Chunk upload completed for '%s' (%lld bytes out of %lld)", tc->url , chunked_done, chunked_total_size);
}
}
/* throttle connection */
if (status == ne_status_sending) bandwidth_limit = dav_session.bandwidth_limit_upload;
if (status == ne_status_recving) bandwidth_limit = dav_session.bandwidth_limit_download;
if (bandwidth_limit > 0 && gettimeofday(&now, NULL) == 0) {
int64_t diff = _timediff(tc->last_time, now);
int64_t len = info->sr.progress - tc->last_progress;
if (len > 0 && diff > 0 && (1000000 * len / diff) > (int64_t)bandwidth_limit) {
int64_t wait_time = (1000000 * len / bandwidth_limit) - diff;
if (wait_time > 0) {
usleep(wait_time);
}
}
tc->last_progress = info->sr.progress;
gettimeofday(&tc->last_time, NULL);
} else if (bandwidth_limit < 0 && bandwidth_limit > -100 && gettimeofday(&now, NULL) == 0) {
int64_t diff = _timediff(tc->last_time, now);
if (diff > 0) {
// -bandwidth_limit is the % of bandwidth
int64_t wait_time = -diff * (1 + 100.0 / bandwidth_limit);
if (wait_time > 0) {
usleep(wait_time);
}
}
gettimeofday(&tc->last_time, NULL);
}
}
// as per http://sourceforge.net/p/predef/wiki/OperatingSystems/
// extend as required
static const char* get_platform() {
@ -944,78 +890,6 @@ static int owncloud_stat(const char *uri, csync_vio_file_stat_t *buf) {
return 0;
}
static ssize_t owncloud_write(csync_vio_method_handle_t *fhandle, const void *buf, size_t count) {
(void) fhandle;
(void) buf;
(void) count;
return 0;
}
static int content_reader(void *userdata, const char *buf, size_t len)
{
struct transfer_context *writeCtx = userdata;
size_t written = 0;
if( buf && writeCtx->fd ) {
/* DEBUG_WEBDAV("Writing %scompressed %d bytes", (writeCtx->decompress ? "" : "NON "), len); */
written = write(writeCtx->fd, buf, len);
if( len != written ) {
DEBUG_WEBDAV("WRN: content_reader wrote wrong num of bytes: %zu, %zu %d %d", len, written, errno, writeCtx->fd);
}
writeCtx->get_size += len;
return NE_OK;
} else {
errno = EBADF;
}
return NE_ERROR;
}
/*
* This hook is called after the response is here from the server, but before
* the response body is parsed. It decides if the response is compressed and
* if it is it installs the compression reader accordingly.
* If the response is not compressed, the normal response body reader is installed.
*/
static void install_content_reader( ne_request *req, void *userdata, const ne_status *status )
{
const char *enc = NULL;
struct transfer_context *writeCtx = userdata;
(void) status;
if( !writeCtx ) {
DEBUG_WEBDAV("Error: install_content_reader called without valid write context!");
return;
}
enc = ne_get_response_header( req, "Content-Encoding" );
DEBUG_WEBDAV("Content encoding ist <%s> with status %d", enc ? enc : "empty",
status ? status->code : -1 );
if( enc && c_streq( enc, "gzip" )) {
writeCtx->decompress = ne_decompress_reader( req, ne_accept_2xx,
content_reader, /* reader callback */
(void*) writeCtx ); /* userdata */
} else {
ne_add_response_body_reader( req, ne_accept_2xx,
content_reader,
(void*) writeCtx );
writeCtx->decompress = NULL;
}
enc = ne_get_response_header( req, "ETag" );
if (enc && *enc) {
SAFE_FREE(_id_cache.uri);
SAFE_FREE(_id_cache.id);
_id_cache.uri = c_strdup(writeCtx->url);
_id_cache.id = c_strdup(enc);
}
}
static char*_lastDir = NULL;
/* capabilities are currently:
* bool atomar_copy_support - oC provides atomar copy
* bool do_post_copy_stat - oC does not want the post copy check
@ -1104,412 +978,6 @@ static const char* owncloud_get_etag( const char *path )
return cbuf;
}
static csync_vio_method_handle_t *owncloud_open(const char *durl,
int flags,
mode_t mode) {
char *uri = NULL;
char *dir = NULL;
int put = 0;
int rc = NE_OK;
struct transfer_context *writeCtx = NULL;
csync_vio_file_stat_t statBuf;
ZERO_STRUCT(statBuf);
(void) mode; /* unused on webdav server */
DEBUG_WEBDAV( "=> open called for %s", durl );
uri = _cleanPath( durl );
if( ! uri ) {
DEBUG_WEBDAV("Failed to clean path for %s", durl );
errno = EACCES;
rc = NE_ERROR;
}
if( rc == NE_OK )
dav_connect( durl );
if (flags & O_WRONLY) {
put = 1;
}
if (flags & O_RDWR) {
put = 1;
}
if (flags & O_CREAT) {
put = 1;
}
if( rc == NE_OK && put ) {
/* check if the dir name exists. Otherwise return ENOENT */
dir = c_dirname( durl );
if (dir == NULL) {
errno = ENOMEM;
SAFE_FREE(uri);
return NULL;
}
DEBUG_WEBDAV("Stating directory %s", dir );
if( c_streq( dir, _lastDir )) {
DEBUG_WEBDAV("Dir %s is there, we know it already.", dir);
} else {
if( owncloud_stat( dir, &statBuf ) == 0 ) {
SAFE_FREE(statBuf.name);
SAFE_FREE(statBuf.md5);
DEBUG_WEBDAV("Directory of file to open exists.");
SAFE_FREE( _lastDir );
_lastDir = c_strdup(dir);
} else {
DEBUG_WEBDAV("Directory %s of file to open does NOT exist.", dir );
/* the directory does not exist. That is an ENOENT */
errno = ENOENT;
SAFE_FREE(dir);
SAFE_FREE(uri);
SAFE_FREE(statBuf.name);
return NULL;
}
}
}
writeCtx = c_malloc( sizeof(struct transfer_context) );
writeCtx->url = c_strdup(durl);
writeCtx->req = NULL;
writeCtx->fd = -1;
if( rc == NE_OK && put) {
DEBUG_WEBDAV("PUT request on %s!", uri);
writeCtx->method = "PUT";
}
if( rc == NE_OK && ! put ) {
writeCtx->method = "GET";
DEBUG_WEBDAV("GET request on %s", uri );
}
if( rc != NE_OK ) {
SAFE_FREE( writeCtx );
}
SAFE_FREE( uri );
SAFE_FREE( dir );
return (csync_vio_method_handle_t *) writeCtx;
}
static csync_vio_method_handle_t *owncloud_creat(const char *durl, mode_t mode) {
csync_vio_method_handle_t *handle = owncloud_open(durl, O_CREAT|O_WRONLY|O_TRUNC, mode);
/* on create, the file needs to be created empty */
owncloud_write( handle, NULL, 0 );
return handle;
}
static int _user_want_abort(void *userdata)
{
(void)userdata;
return csync_abort_requested(dav_session.csync_ctx);
}
static void _log_callback(const char *func, const char *text, void *userdata)
{
(void)userdata;
csync_log(CSYNC_LOG_PRIORITY_TRACE, func, "%s", text);
}
static int owncloud_sendfile(csync_vio_method_handle_t *src, csync_vio_method_handle_t *hdl ) {
int rc = 0;
int neon_stat;
const ne_status *status;
struct transfer_context *write_ctx = (struct transfer_context*) hdl;
fhandle_t *fh = (fhandle_t *) src;
int fd;
int error_code = 0;
char *clean_uri = NULL;
int64_t file_size;
if( ! write_ctx ) {
errno = EINVAL;
return -1;
}
if( !fh ) {
errno = EINVAL;
return -1;
}
fd = fh->fd;
clean_uri = _cleanPath( write_ctx->url );
chunked_total_size = 0;
chunked_done = 0;
gettimeofday(&write_ctx->last_time, NULL);
DEBUG_WEBDAV("Sendfile handling request type %s. fd %d", write_ctx->method, fd);
/*
* Copy from the file descriptor if method == PUT
* Copy to the file descriptor if method == GET
*/
if( c_streq( write_ctx->method, "PUT") ) {
bool finished = true;
int attempts = 0;
clear_propfind_recursive_cache();
/*
* do ten tries to upload the file chunked. Check the file size and mtime
* before submitting a chunk and after having submitted the last one.
* If the file has changed, retry.
*/
do {
Hbf_State state = HBF_SUCCESS;
hbf_transfer_t *trans = hbf_init_transfer(clean_uri);
hbf_set_log_callback(trans, _log_callback);
if (dav_session.hbf_block_size > 0) {
trans->threshold = trans->block_size = dav_session.hbf_block_size;
}
if (dav_session.hbf_threshold > 0) {
trans->threshold = dav_session.hbf_threshold;
}
finished = true;
if (!trans) {
DEBUG_WEBDAV("hbf_init_transfer failed");
rc = 1;
} else {
state = hbf_splitlist(trans, fd);
file_size = trans->stat_size;
(void) file_size;
DEBUG_WEBDAV("about to send %d block", trans->block_cnt);
/* Reuse chunk info that was stored in database if existing. */
if (dav_session.chunk_info && dav_session.chunk_info->transfer_id) {
DEBUG_WEBDAV("Existing chunk info %d %d ", dav_session.chunk_info->start_id, dav_session.chunk_info->transfer_id);
trans->start_id = dav_session.chunk_info->start_id;
trans->transfer_id = dav_session.chunk_info->transfer_id;
}
if (state == HBF_SUCCESS) {
ne_set_notifier(dav_session.ctx, ne_notify_status_cb, write_ctx);
oc_notify_progress(write_ctx->url, CSYNC_NOTIFY_START_UPLOAD, 0, 0);
}
/* Register the abort callback */
hbf_set_abort_callback( trans, _user_want_abort );
if( state == HBF_SUCCESS ) {
chunked_total_size = trans->stat_size;
/* Transfer all the chunks through the HTTP session using PUT. */
state = hbf_transfer( dav_session.ctx, trans, "PUT" );
}
/* Handle errors. */
if ( state != HBF_SUCCESS ) {
if( state == HBF_USER_ABORTED ) {
DEBUG_WEBDAV("User Aborted file upload!");
errno = ERRNO_USER_ABORT;
rc = -1;
}
/* If the source file changed during submission, lets try again */
if( state == HBF_SOURCE_FILE_CHANGE ) {
if( attempts++ < 30 ) { /* FIXME: How often do we want to try? */
finished = false; /* make it try again from scratch. */
DEBUG_WEBDAV("SOURCE file has changed during upload, retry #%d in two seconds!", attempts);
sleep(2);
}
}
if( finished ) {
SAFE_FREE(dav_session.error_string);
dav_session.error_string = c_strdup(hbf_error_string(trans, state));
error_code = hbf_fail_http_code(trans);
set_errno_from_http_errcode(error_code);
rc = 1;
if (dav_session.chunk_info) {
dav_session.chunk_info->start_id = trans->start_id;
dav_session.chunk_info->transfer_id = trans->transfer_id;
}
}
}
}
hbf_free_transfer(trans);
} while( !finished );
ne_set_notifier(dav_session.ctx, 0, 0);
if( rc == 0 ) {
DEBUG_WEBDAV(" * Upload finished %s", write_ctx->url);
} else {
oc_notify_progress(write_ctx->url, CSYNC_NOTIFY_ERROR, error_code, (intptr_t)(dav_session.error_string));
}
} else if( c_streq( write_ctx->method, "GET") ) {
/* GET a file to the file descriptor */
/* actually do the request */
int retry = 0;
DEBUG_WEBDAV(" -- GET on %s", write_ctx->url);
write_ctx->fd = fd;
ne_set_notifier(dav_session.ctx, ne_notify_status_cb, write_ctx);
oc_notify_progress(write_ctx->url, CSYNC_NOTIFY_START_DOWNLOAD, 0 , 0);
do {
csync_stat_t sb;
if (write_ctx->req)
ne_request_destroy( write_ctx->req );
if( _user_want_abort(0) ) {
errno = ERRNO_USER_ABORT;
break;
}
write_ctx->req = ne_request_create(dav_session.ctx, "GET", clean_uri);;
/* Allow compressed content by setting the header */
ne_add_request_header( write_ctx->req, "Accept-Encoding", "gzip" );
if (_tfstat(fd, &sb) >= 0 && sb.st_size > 0) {
char brange[64];
snprintf(brange, sizeof brange, "bytes=%"PRId64"-", (int64_t) sb.st_size);
ne_add_request_header(write_ctx->req, "Range", brange);
ne_add_request_header(write_ctx->req, "Accept-Ranges", "bytes");
DEBUG_WEBDAV("Retry with range %s fd %d", brange, fd);
}
/* hook called before the content is parsed to set the correct reader,
* either the compressed- or uncompressed reader.
*/
ne_hook_post_headers( dav_session.ctx, install_content_reader, write_ctx );
neon_stat = ne_request_dispatch(write_ctx->req );
/* delete the hook again, otherwise they get chained as they are with the session */
ne_unhook_post_headers( dav_session.ctx, install_content_reader, write_ctx );
/* if the compression handle is set through the post_header hook, delete it. */
if( write_ctx->decompress ) {
ne_decompress_destroy( write_ctx->decompress );
}
/* possible return codes are:
* NE_OK, NE_AUTH, NE_CONNECT, NE_TIMEOUT, NE_ERROR (from ne_request.h)
*/
status = ne_get_status( write_ctx->req );
if( neon_stat != NE_OK ) {
/* If a timeout happened try again for three times */
if (neon_stat == NE_TIMEOUT && (++retry) < 3) {
continue;
}
set_errno_from_neon_errcode(neon_stat);
DEBUG_WEBDAV("Error GET: Neon: %d, errno %d, string %s", neon_stat, errno, dav_session.error_string);
error_code = errno;
if( status != NULL && errno != ERRNO_ERROR_STRING ) {
SAFE_FREE(dav_session.error_string);
dav_session.error_string = c_strdup(status->reason_phrase);
}
rc = 1;
} else {
if( status != NULL ) {
DEBUG_WEBDAV("GET http result %d (%s)", status->code, status->reason_phrase ? status->reason_phrase : "<empty");
if( status->klass != 2 ) {
DEBUG_WEBDAV("sendfile request failed with http status %d!", status->code);
set_errno_from_http_errcode( status->code );
/* decide if soft error or hard error that stops the whole sync. */
/* Currently all problems concerning one file are soft errors */
if( status->klass == 4 /* Forbidden and stuff, soft error */ ) {
rc = 1;
} else if( status->klass == 5 /* Server errors and such */ ) {
rc = 1; /* No Abort on individual file errors. */
} else {
rc = 1;
}
error_code = status->code;
SAFE_FREE(dav_session.error_string);
dav_session.error_string = c_strdup(status->reason_phrase);
} else {
DEBUG_WEBDAV("http request all cool, result code %d", status->code);
}
} else {
/* No status but still a problem */
DEBUG_WEBDAV("GET failed, but no neon status available.");
error_code = 400;
rc = 1;
}
}
break;
} while (1);
ne_set_notifier(dav_session.ctx, 0, 0);
if( rc == 0 ) {
DEBUG_WEBDAV(" ** Finished download %s", write_ctx->url);
} else {
oc_notify_progress( write_ctx->url, CSYNC_NOTIFY_ERROR, error_code , (intptr_t)(dav_session.error_string));
}
} else {
DEBUG_WEBDAV("Unknown method!");
errno = ERRNO_GENERAL_ERROR;
rc = -1;
}
chunked_total_size = 0;
chunked_done = 0;
SAFE_FREE(clean_uri);
return rc;
}
static int owncloud_close(csync_vio_method_handle_t *fhandle) {
struct transfer_context *writeCtx;
int ret = 0;
writeCtx = (struct transfer_context*) fhandle;
if (fhandle == NULL) {
DEBUG_WEBDAV("*** Close returns errno EBADF!");
errno = EBADF;
return -1;
}
if (writeCtx->req)
ne_request_destroy( writeCtx->req );
if( ret != -1 && strcmp( writeCtx->method, "PUT" ) == 0 ) {
// Clear the cache so get_id gets the updates
clean_caches();
}
/* free mem. Note that the request mem is freed by the ne_request_destroy call */
SAFE_FREE( writeCtx->url );
SAFE_FREE( writeCtx );
return ret;
}
static ssize_t owncloud_read(csync_vio_method_handle_t *fhandle, void *buf, size_t count) {
size_t len = 0;
(void) fhandle;
(void) buf;
(void) count;
return len;
}
static int64_t owncloud_lseek(csync_vio_method_handle_t *fhandle, int64_t offset, int whence) {
(void) fhandle;
(void) offset;
(void) whence;
return -1;
}
/*
* directory functions
*/
@ -1598,155 +1066,6 @@ static csync_vio_file_stat_t *owncloud_readdir(csync_vio_method_handle_t *dhandl
return NULL;
}
static int owncloud_mkdir(const char *uri, mode_t mode) {
int rc = NE_OK;
int len = 0;
char *path = _cleanPath( uri );
(void) mode; /* unused */
clear_propfind_recursive_cache();
if( ! path ) {
errno = EINVAL;
rc = -1;
}
rc = dav_connect(uri);
if (rc < 0) {
errno = EINVAL;
}
/* the uri path is required to have a trailing slash */
if( rc >= 0 ) {
len = strlen( path );
if( path[len-1] != '/' ) {
path = c_realloc(path, len+2);
path[len]= '/';
path[len+1] = 0;
}
DEBUG_WEBDAV("MKdir on %s", path );
rc = ne_mkcol(dav_session.ctx, path );
set_errno_from_neon_errcode(rc);
/* Special for mkcol: it returns 405 if the directory already exists.
* To keep csync vio_mkdirs working errno EEXIST has to be returned. */
if (errno == EPERM && http_result_code_from_session() == 405) {
errno = EEXIST;
} else if (rc != NE_OK) {
oc_notify_progress(uri, CSYNC_NOTIFY_ERROR, http_result_code_from_session(),
(intptr_t)(dav_session.error_string) );
}
}
SAFE_FREE( path );
if( rc < 0 || rc != NE_OK ) {
return -1;
}
return 0;
}
static int owncloud_rmdir(const char *uri) {
int rc = NE_OK;
char* curi = _cleanPath( uri );
if( curi == NULL ) {
DEBUG_WEBDAV("Can not clean path for %s, bailing out.", uri ? uri:"<empty>");
return -1;
}
rc = dav_connect(uri);
if (rc < 0) {
errno = EINVAL;
}
if( rc >= 0 ) {
rc = ne_delete(dav_session.ctx, curi);
set_errno_from_neon_errcode( rc );
}
SAFE_FREE( curi );
if( rc < 0 || rc != NE_OK ) {
return -1;
}
return 0;
}
static int owncloud_rename(const char *olduri, const char *newuri) {
char *src = NULL;
char *target = NULL;
int rc = NE_OK;
clear_propfind_recursive_cache();
rc = dav_connect(olduri);
if (rc < 0) {
errno = EINVAL;
}
src = _cleanPath( olduri );
target = _cleanPath( newuri );
if( rc >= 0 ) {
DEBUG_WEBDAV("MOVE: %s => %s: %d", src, target, rc );
rc = ne_move(dav_session.ctx, 1, src, target );
if (rc == NE_ERROR && http_result_code_from_session() == 409) {
/* destination folder might not exist */
errno = ENOENT;
} else {
set_errno_from_neon_errcode(rc);
if (rc != NE_OK) {
oc_notify_progress(olduri, CSYNC_NOTIFY_ERROR, http_result_code_from_session(),
(intptr_t)(dav_session.error_string) );
}
}
}
SAFE_FREE( src );
SAFE_FREE( target );
if( rc != NE_OK )
return 1;
return 0;
}
static int owncloud_unlink(const char *uri) {
int rc = NE_OK;
char *path = _cleanPath( uri );
clear_propfind_recursive_cache();
if( ! path ) {
rc = NE_ERROR;
errno = EINVAL;
}
if( rc == NE_OK ) {
rc = dav_connect(uri);
if (rc < 0) {
errno = EINVAL;
}
}
if( rc == NE_OK ) {
rc = ne_delete( dav_session.ctx, path );
set_errno_from_neon_errcode(rc);
}
SAFE_FREE( path );
return 0;
}
static int owncloud_chmod(const char *uri, mode_t mode) {
(void) uri;
(void) mode;
return 0;
}
static int owncloud_chown(const char *uri, uid_t owner, gid_t group) {
(void) uri;
(void) owner;
(void) group;
return 0;
}
static char *owncloud_error_string()
{
return dav_session.error_string;
@ -1754,8 +1073,6 @@ static char *owncloud_error_string()
static int owncloud_commit() {
SAFE_FREE( _lastDir );
clean_caches();
if( dav_session.ctx )
@ -1770,56 +1087,6 @@ static int owncloud_commit() {
return 0;
}
static int owncloud_utimes(const char *uri, const struct timeval *times) {
ne_proppatch_operation ops[2];
ne_propname pname;
int rc = NE_OK;
char val[255];
char *curi = NULL;
const struct timeval *modtime = times+1;
long newmodtime;
curi = _cleanPath( uri );
if( ! uri ) {
errno = ENOENT;
return -1;
}
if( !times ) {
errno = EACCES;
return -1; /* FIXME: Find good errno */
}
pname.nspace = "DAV:";
pname.name = "lastmodified";
newmodtime = modtime->tv_sec;
snprintf( val, sizeof(val), "%ld", newmodtime );
DEBUG_WEBDAV("Setting LastModified of %s to %s", curi, val );
ops[0].name = &pname;
ops[0].type = ne_propset;
ops[0].value = val;
ops[1].name = NULL;
rc = ne_proppatch( dav_session.ctx, curi, ops );
SAFE_FREE(curi);
if( rc != NE_OK ) {
const char *err = ne_get_error(dav_session.ctx);
set_errno_from_neon_errcode(rc);
DEBUG_WEBDAV("Error in propatch: %s", err == NULL ? "<empty err msg.>" : err);
return -1;
}
clean_caches();
return 0;
}
static int owncloud_set_property(const char *key, void *data) {
#define READ_STRING_PROPERTY(P) \
if (c_streq(key, #P)) { \
@ -1895,24 +1162,24 @@ csync_vio_method_t _method = {
.method_table_size = sizeof(csync_vio_method_t),
.get_capabilities = owncloud_capabilities,
.get_etag = owncloud_get_etag,
.open = owncloud_open,
.creat = owncloud_creat,
.close = owncloud_close,
.read = owncloud_read,
.write = owncloud_write,
.sendfile = owncloud_sendfile,
.lseek = owncloud_lseek,
.open = 0,
.creat = 0,
.close = 0,
.read = 0,
.write = 0,
.sendfile = 0,
.lseek = 0,
.opendir = owncloud_opendir,
.closedir = owncloud_closedir,
.readdir = owncloud_readdir,
.mkdir = owncloud_mkdir,
.rmdir = owncloud_rmdir,
.mkdir = 0,
.rmdir = 0,
.stat = owncloud_stat,
.rename = owncloud_rename,
.unlink = owncloud_unlink,
.chmod = owncloud_chmod,
.chown = owncloud_chown,
.utimes = owncloud_utimes,
.rename = 0,
.unlink = 0,
.chmod = 0,
.chown = 0,
.utimes = 0,
.set_property = owncloud_set_property,
.get_error_string = owncloud_error_string,
.commit = owncloud_commit

View file

@ -116,25 +116,6 @@ void fill_recursive_propfind_cache(const char *uri, const char *curi);
struct listdir_context *get_listdir_context_from_cache(const char *curi);
void fetch_resource_list_recursive(const char *uri, const char *curi);
/*
* context to store info about a temp file for GET and PUT requests
* which store the data in a local file to save memory and secure the
* transmission.
*/
struct transfer_context {
ne_request *req; /* the neon request */
int fd; /* file descriptor of the file to read or write from */
const char *method; /* the HTTP method, either PUT or GET */
ne_decompress *decompress; /* the decompress context */
char *url;
/* Used for limiting the bandwidth */
struct timeval last_time;
ne_off_t last_progress;
int64_t get_size;
};
typedef int (*csync_owncloud_redirect_callback_t)(CSYNC* ctx, const char* uri);
/* Struct with the WebDAV session */

View file

@ -200,11 +200,6 @@ int csync_vio_init(CSYNC *ctx, const char *module, const char *args) {
return -1;
}
if (! VIO_METHOD_HAS_FUNC(m, open)) {
CSYNC_LOG(CSYNC_LOG_PRIORITY_ERROR, "module %s has no open fn", module);
return -1;
}
if (! VIO_METHOD_HAS_FUNC(m, opendir)) {
CSYNC_LOG(CSYNC_LOG_PRIORITY_ERROR, "module %s has no opendir fn", module);
return -1;
@ -222,11 +217,6 @@ int csync_vio_init(CSYNC *ctx, const char *module, const char *args) {
CSYNC_LOG(CSYNC_LOG_PRIORITY_WARN, "module %s has no capabilities fn", module);
}
if (! VIO_METHOD_HAS_FUNC(m, open)) {
CSYNC_LOG(CSYNC_LOG_PRIORITY_ERROR, "module %s has no stat fn", module);
return -1;
}
if (! VIO_METHOD_HAS_FUNC(m, get_etag)) {
CSYNC_LOG(CSYNC_LOG_PRIORITY_WARN, "module %s has no get_etag fn", module);
}