Remove legacy propagator and neon

The code was already uneeded/unbuilt on Windows and OS X.
This commit is contained in:
Markus Goetz 2015-10-20 16:58:32 +02:00
parent 98b966d274
commit c8590c4468
45 changed files with 30 additions and 3941 deletions

View file

@ -133,13 +133,9 @@ endif()
#endif()
set(USE_NEON TRUE)
if(HAVE_QT5)
message(STATUS "Using Qt ${Qt5Core_VERSION_MAJOR}.${Qt5Core_VERSION_MINOR}.x")
if (${Qt5Core_VERSION_MAJOR} EQUAL "5")
if (${Qt5Core_VERSION_MINOR} EQUAL "4" OR ${Qt5Core_VERSION_MINOR} GREATER 4)
message(STATUS "We do not require Neon in this setup, compile without!")
set(USE_NEON FALSE)
else()
message(STATUS "If possible compile me with Qt 5.4 or higher.")
endif()
@ -148,9 +144,6 @@ else()
message(STATUS "If possible compile me with Qt 5.4 or higher.")
endif()
if (USE_NEON)
find_package(Neon REQUIRED)
endif(USE_NEON)
find_package(OpenSSL 1.0.0 REQUIRED)
if(NOT TOKEN_AUTH_ONLY)

View file

@ -32,7 +32,3 @@ SET(QT_MOC_EXECUTABLE ${MINGW_PREFIX}-moc)
SET(QT_RCC_EXECUTABLE ${MINGW_PREFIX}-rcc)
SET(QT_UIC_EXECUTABLE ${MINGW_PREFIX}-uic)
SET(QT_LRELEASE_EXECUTABLE ${MINGW_PREFIX}-lrelease)
# neon config
SET(NEON_CONFIG_EXECUTABLE ${CMAKE_FIND_ROOT_PATH}/bin/neon-config)
# /usr/i686-w64-mingw32/sys-root/mingw/bin/neon-config

View file

@ -1,73 +0,0 @@
# - Try to find Neon
# Once done this will define
#
# NEON_FOUND - system has Neon
# NEON_INCLUDE_DIRS - the Neon include directory
# NEON_LIBRARIES - Link these to use Neon
# NEON_DEFINITIONS - Compiler switches required for using Neon
#
# Copyright (c) 2011-2013 Andreas Schneider <asn@cryptomilk.org>
#
# Redistribution and use is allowed according to the terms of the New
# BSD license.
# For details see the accompanying COPYING-CMAKE-SCRIPTS file.
#
find_package(PkgConfig)
if (PKG_CONFIG_FOUND)
pkg_check_modules(_NEON neon)
endif (PKG_CONFIG_FOUND)
include(GNUInstallDirs)
find_path(NEON_INCLUDE_DIRS
NAMES
neon/ne_basic.h
HINTS
${_NEON_INCLUDEDIR}
${CMAKE_INSTALL_INCLUDEDIR}
)
find_library(NEON_LIBRARIES
NAMES
neon neon-27
HINTS
${_NEON_LIBDIR}
${CMAKE_INSTALL_LIBDIR}
${CMAKE_INSTALL_PREFIX}/lib
${CMAKE_INSTALL_PREFIX}/lib64
)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Neon DEFAULT_MSG NEON_LIBRARIES NEON_INCLUDE_DIRS)
# show the NEON_INCLUDE_DIRS and NEON_LIBRARIES variables only in the advanced view
mark_as_advanced(NEON_INCLUDE_DIRS NEON_LIBRARIES)
# Check if neon was compiled with LFS support, if so, the NE_LFS variable has to
# be defined in the owncloud module.
# If neon was not compiled with LFS its also ok since the underlying system
# than probably supports large files anyway.
IF( CMAKE_FIND_ROOT_PATH )
FIND_PROGRAM( NEON_CONFIG_EXECUTABLE NAMES neon-config HINTS ${CMAKE_FIND_ROOT_PATH}/bin )
ELSE( CMAKE_FIND_ROOT_PATH )
FIND_PROGRAM( NEON_CONFIG_EXECUTABLE NAMES neon-config )
ENDIF( CMAKE_FIND_ROOT_PATH )
IF ( NEON_CONFIG_EXECUTABLE )
MESSAGE(STATUS "neon-config executable: ${NEON_CONFIG_EXECUTABLE}")
# neon-config --support lfs
EXECUTE_PROCESS( COMMAND ${NEON_CONFIG_EXECUTABLE} "--support" "lfs"
RESULT_VARIABLE LFS
OUTPUT_STRIP_TRAILING_WHITESPACE )
IF (LFS EQUAL 0)
MESSAGE(STATUS "libneon has been compiled with LFS support")
SET(NEON_WITH_LFS 1)
ELSE (LFS EQUAL 0)
MESSAGE(STATUS "libneon has not been compiled with LFS support, rely on OS")
ENDIF (LFS EQUAL 0)
ELSE ( NEON_CONFIG_EXECUTABLE )
MESSAGE(STATUS, "neon-config could not be found.")
ENDIF ( NEON_CONFIG_EXECUTABLE )

View file

@ -31,7 +31,9 @@ if( Qt5Core_FOUND )
else( Qt5Core_FOUND )
if(WIN32 OR APPLE)
if (NOT BUILD_WITH_QT4)
message(FATAL_ERROR "Qt 5 not found, but application depends on Qt5 on Windows and Mac OS X")
endif ()
endif(WIN32 OR APPLE)
endif( Qt5Core_FOUND )

View file

@ -23,8 +23,4 @@
#cmakedefine SYSCONFDIR "@SYSCONFDIR@"
#cmakedefine SHAREDIR "@SHAREDIR@"
#ifndef NEON_WITH_LFS
#cmakedefine NEON_WITH_LFS "@NEON_WITH_LFS@"
#endif
#endif

View file

@ -24,10 +24,6 @@
#cmakedefine HAVE_ICONV 1
#cmakedefine HAVE_ICONV_CONST 1
#ifndef NEON_WITH_LFS
#cmakedefine NEON_WITH_LFS 1
#endif
#cmakedefine HAVE___MINGW_ASPRINTF 1
#cmakedefine HAVE_ASPRINTF 1

View file

@ -1,9 +1,6 @@
project(libcsync)
add_subdirectory(std)
if(USE_NEON)
add_subdirectory(httpbf)
endif()
# Statically include sqlite
@ -71,19 +68,6 @@ else()
endif()
if(USE_NEON)
list(APPEND csync_SRCS
csync_owncloud.c
csync_owncloud_util.c
)
list(APPEND CSYNC_LINK_LIBRARIES
${NEON_LIBRARIES}
)
include_directories(${NEON_INCLUDE_DIRS})
add_definitions(-DUSE_NEON)
endif(USE_NEON)
configure_file(csync_version.h.in ${CMAKE_CURRENT_BINARY_DIR}/csync_version.h)
set(csync_HDRS

View file

@ -58,12 +58,6 @@
#include "csync_rename.h"
#include "c_jhash.h"
#ifdef USE_NEON
// Breaking the abstraction for fun and profit.
#include "csync_owncloud.h"
#endif
static int _key_cmp(const void *key, const void *data) {
uint64_t a;
csync_file_stat_t *b;
@ -154,9 +148,6 @@ int csync_init(CSYNC *ctx) {
ctx->local.type = LOCAL_REPLICA;
#ifdef USE_NEON
owncloud_init(ctx);
#endif
ctx->remote.type = REMOTE_REPLICA;
if (c_rbtree_create(&ctx->local.tree, _key_cmp, _data_cmp) < 0) {
@ -216,14 +207,6 @@ int csync_update(CSYNC *ctx) {
CSYNC_LOG(CSYNC_LOG_PRIORITY_DEBUG, "No exclude file loaded or defined!");
}
#ifdef USE_NEON
/* This is not actually connecting, just setting the info for neon. The legacy propagator can use it.. */
if (dav_connect( ctx, ctx->remote.uri ) < 0) {
ctx->status_code = CSYNC_STATUS_CONNECT_ERROR;
return -1;
}
#endif
/* update detection for local replica */
csync_gettime(&start);
ctx->current = LOCAL_REPLICA;
@ -646,10 +629,6 @@ int csync_destroy(CSYNC *ctx) {
SAFE_FREE(ctx->remote.uri);
SAFE_FREE(ctx->error_string);
#ifdef USE_NEON
owncloud_destroy(ctx);
#endif
#ifdef WITH_ICONV
c_close_iconv();
#endif
@ -672,21 +651,6 @@ void csync_clear_exclude_list(CSYNC *ctx)
csync_exclude_clear(ctx);
}
int csync_set_auth_callback(CSYNC *ctx, csync_auth_callback cb) {
if (ctx == NULL || cb == NULL) {
return -1;
}
if (ctx->status & CSYNC_STATUS_INIT) {
ctx->status_code = CSYNC_STATUS_CSYNC_STATUS_ERROR;
fprintf(stderr, "This function must be called before initialization.");
return -1;
}
ctx->callbacks.auth_function = cb;
return 0;
}
void *csync_get_userdata(CSYNC *ctx) {
if (ctx == NULL) {
return NULL;
@ -781,14 +745,3 @@ void csync_file_stat_free(csync_file_stat_t *st)
SAFE_FREE(st);
}
}
int csync_set_module_property(CSYNC* ctx, const char* key, void* value)
{
#ifdef USE_NEON
return owncloud_set_property(ctx, key, value);
#else
(void)ctx, (void)key, (void)value;
return 0;
#endif
}

View file

@ -519,19 +519,6 @@ const char *csync_get_status_string(CSYNC *ctx);
int csync_set_iconv_codec(const char *from);
#endif
/**
* @brief Set a property to module
*
* @param ctx The csync context.
*
* @param key The property key
*
* @param value An opaque pointer to the data.
*
* @return 0 on success, less than 0 if an error occured.
*/
int csync_set_module_property(CSYNC *ctx, const char *key, void *value);
/**
* @brief Aborts the current sync run as soon as possible. Can be called from another thread.
*

View file

@ -1,617 +0,0 @@
/*
* libcsync -- a library to sync a directory with another
*
* Copyright (c) 2011 by Andreas Schneider <asn@cryptomilk.org>
* Copyright (c) 2012 by Klaas Freitag <freitag@owncloud.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "csync_owncloud.h"
#include "csync_owncloud_private.h"
#include <inttypes.h>
#include "csync_private.h"
#include "csync_version.h"
/*
* helper method to build up a user text for SSL problems, called from the
* verify_sslcert callback.
*/
static void addSSLWarning( char *ptr, const char *warn, int len )
{
char *concatHere = ptr;
int remainingLen = 0;
if( ! (warn && ptr )) return;
remainingLen = len - strlen(ptr);
if( remainingLen <= 0 ) return;
concatHere = ptr + strlen(ptr); /* put the write pointer to the end. */
strncpy( concatHere, warn, remainingLen );
}
/*
* Callback to verify the SSL certificate, called from libneon.
* It analyzes the SSL problem, creates a user information text and passes
* it to the csync callback to ask the user.
*/
#define LEN 4096
static int ssl_callback_by_neon(void *userdata, int failures,
const ne_ssl_certificate *certificate)
{
char problem[LEN];
char buf[MAX(NE_SSL_DIGESTLEN, NE_ABUFSIZ)];
int ret = -1;
const ne_ssl_certificate *cert = certificate;
csync_auth_callback authcb = NULL;
csync_owncloud_ctx_t *ctx = (csync_owncloud_ctx_t*) userdata;
memset( problem, 0, LEN );
while( cert ) {
addSSLWarning( problem, "There are problems with the SSL certificate:\n", LEN );
if( failures & NE_SSL_NOTYETVALID ) {
addSSLWarning( problem, " * The certificate is not yet valid.\n", LEN );
}
if( failures & NE_SSL_EXPIRED ) {
addSSLWarning( problem, " * The certificate has expired.\n", LEN );
}
if( failures & NE_SSL_UNTRUSTED ) {
addSSLWarning( problem, " * The certificate is not trusted!\n", LEN );
}
if( failures & NE_SSL_IDMISMATCH ) {
addSSLWarning( problem, " * The hostname for which the certificate was "
"issued does not match the hostname of the server\n", LEN );
}
if( failures & NE_SSL_BADCHAIN ) {
addSSLWarning( problem, " * The certificate chain contained a certificate other than the server cert\n", LEN );
}
if( failures & NE_SSL_REVOKED ) {
addSSLWarning( problem, " * The server certificate has been revoked by the issuing authority.\n", LEN );
}
if (ne_ssl_cert_digest(cert, buf) == 0) {
addSSLWarning( problem, "Certificate fingerprint: ", LEN );
addSSLWarning( problem, buf, LEN );
addSSLWarning( problem, "\n", LEN );
}
cert = ne_ssl_cert_signedby( cert );
}
addSSLWarning( problem, "Do you want to accept the certificate chain anyway?\nAnswer yes to do so and take the risk: ", LEN );
if( ctx->csync_ctx ) {
authcb = csync_get_auth_callback( ctx->csync_ctx );
}
if( authcb ){
/* call the csync callback */
DEBUG_WEBDAV("Call the csync callback for SSL problems");
memset( buf, 0, NE_ABUFSIZ );
(*authcb) ( problem, buf, NE_ABUFSIZ-1, 1, 0, csync_get_userdata(ctx->csync_ctx) );
if( buf[0] == 'y' || buf[0] == 'Y') {
ret = 0;
} else {
DEBUG_WEBDAV("Authentication callback replied %s", buf );
}
}
DEBUG_WEBDAV("## VERIFY_SSL CERT: %d", ret );
return ret;
}
/*
* Authentication callback. Is set by ne_set_server_auth to be called
* from the neon lib to authenticate a request.
*/
static int authentication_callback_by_neon( void *userdata, const char *realm, int attempt,
char *username, char *password)
{
char buf[NE_ABUFSIZ];
csync_auth_callback authcb = NULL;
int re = attempt;
csync_owncloud_ctx_t *ctx = (csync_owncloud_ctx_t*) userdata;
(void) realm;
/* DEBUG_WEBDAV( "Authentication required %s", realm ); */
if( username && password ) {
DEBUG_WEBDAV( "Authentication required %s", username );
if( ctx->dav_session.user ) {
/* allow user without password */
if( strlen( ctx->dav_session.user ) < NE_ABUFSIZ ) {
strcpy( username, ctx->dav_session.user );
}
if( ctx->dav_session.pwd && strlen( ctx->dav_session.pwd ) < NE_ABUFSIZ ) {
strcpy( password, ctx->dav_session.pwd );
}
} else {
authcb = csync_get_auth_callback( ctx->csync_ctx );
if( authcb != NULL ){
/* call the csync callback */
DEBUG_WEBDAV("Call the csync callback for %s", realm );
memset( buf, 0, NE_ABUFSIZ );
(*authcb) ("Enter your username: ", buf, NE_ABUFSIZ-1, 1, 0, csync_get_userdata(ctx->csync_ctx) );
if( strlen(buf) < NE_ABUFSIZ ) {
strcpy( username, buf );
}
memset( buf, 0, NE_ABUFSIZ );
(*authcb) ("Enter your password: ", buf, NE_ABUFSIZ-1, 0, 0, csync_get_userdata(ctx->csync_ctx) );
if( strlen(buf) < NE_ABUFSIZ) {
strcpy( password, buf );
}
} else {
re = 1;
}
}
}
return re;
}
/*
* Authentication callback. Is set by ne_set_proxy_auth to be called
* from the neon lib to authenticate against a proxy. The data to authenticate
* against comes from mirall throught vio_module_init function.
*/
static int proxy_authentication_callback_by_neon( void *userdata, const char *realm, int attempt,
char *username, char *password)
{
csync_owncloud_ctx_t *ctx = (csync_owncloud_ctx_t*) userdata;
(void) realm;
if( ctx->dav_session.proxy_user && strlen( ctx->dav_session.proxy_user ) < NE_ABUFSIZ) {
strcpy( username, ctx->dav_session.proxy_user );
if( ctx->dav_session.proxy_pwd && strlen( ctx->dav_session.proxy_pwd ) < NE_ABUFSIZ) {
strcpy( password, ctx->dav_session.proxy_pwd );
}
}
/* NTLM needs several attempts */
return (attempt < 3) ? 0 : -1;
}
/* Configure the proxy depending on the variables */
static int configureProxy( csync_owncloud_ctx_t *ctx, ne_session *session )
{
int port = 8080;
int re = -1;
if( ! session ) return -1;
if( ! ctx->dav_session.proxy_type ) return 0; /* Go by NoProxy per default */
if( ctx->dav_session.proxy_port > 0 ) {
port = ctx->dav_session.proxy_port;
}
if( c_streq(ctx->dav_session.proxy_type, "NoProxy" )) {
DEBUG_WEBDAV("No proxy configured.");
re = 0;
} else if( c_streq(ctx->dav_session.proxy_type, "DefaultProxy") ||
c_streq(ctx->dav_session.proxy_type, "HttpProxy") ||
c_streq(ctx->dav_session.proxy_type, "HttpCachingProxy") ||
c_streq(ctx->dav_session.proxy_type, "Socks5Proxy")) {
if( ctx->dav_session.proxy_host ) {
DEBUG_WEBDAV("%s at %s:%d", ctx->dav_session.proxy_type, ctx->dav_session.proxy_host, port );
if (c_streq(ctx->dav_session.proxy_type, "Socks5Proxy")) {
ne_session_socks_proxy(session, NE_SOCK_SOCKSV5, ctx->dav_session.proxy_host, port,
ctx->dav_session.proxy_user, ctx->dav_session.proxy_pwd);
} else {
ne_session_proxy(session, ctx->dav_session.proxy_host, port );
}
re = 2;
} else {
DEBUG_WEBDAV("%s requested but no proxy host defined.", ctx->dav_session.proxy_type );
/* we used to try ne_system_session_proxy here, but we should rather err out
to behave exactly like the caller. */
}
} else {
DEBUG_WEBDAV( "Unsupported Proxy: %s", ctx->dav_session.proxy_type );
}
return re;
}
/*
* This hook is called for with the response of a request. Here its checked
* if a Set-Cookie header is there for the PHPSESSID. The key is stored into
* the webdav session to be added to subsequent requests.
*/
static void post_request_hook(ne_request *req, void *userdata, const ne_status *status)
{
const char *set_cookie_header = NULL;
const char *sc = NULL;
char *key = NULL;
csync_owncloud_ctx_t *ctx = (csync_owncloud_ctx_t*) userdata;
if (ctx->dav_session.session_key)
return; /* We already have a session cookie, and we should ignore other ones */
if(!(status && req)) return;
if( status->klass == 2 || status->code == 401 ) {
/* successful request */
set_cookie_header = ne_get_response_header( req, "Set-Cookie" );
if( set_cookie_header ) {
DEBUG_WEBDAV(" Set-Cookie found: %s", set_cookie_header);
/* try to find a ', ' sequence which is the separator of neon if multiple Set-Cookie
* headers are there.
* The following code parses a string like this:
* Set-Cookie: 50ace6bd8a669=p537brtt048jh8srlp2tuep7em95nh9u98mj992fbqc47d1aecp1;
*/
sc = set_cookie_header;
while(sc) {
const char *sc_val = sc;
const char *sc_end = sc_val;
int cnt = 0;
int len = strlen(sc_val); /* The length of the rest of the header string. */
while( cnt < len && *sc_end != ';' && *sc_end != ',') {
cnt++;
sc_end++;
}
if( cnt == len ) {
/* exit: We are at the end. */
sc = NULL;
} else if( *sc_end == ';' ) {
/* We are at the end of the session key. */
int keylen = sc_end-sc_val;
if( key ) {
int oldlen = strlen(key);
key = c_realloc(key, oldlen + 2 + keylen+1);
strcpy(key + oldlen, "; ");
strncpy(key + oldlen + 2, sc_val, keylen);
key[oldlen + 2 + keylen] = '\0';
} else {
key = c_malloc(keylen+1);
strncpy( key, sc_val, keylen );
key[keylen] = '\0';
}
/* now search for a ',' to find a potential other header entry */
while(cnt < len && *sc_end != ',') {
cnt++;
sc_end++;
}
if( cnt < len )
sc = sc_end+2; /* mind the space after the comma */
else
sc = NULL;
} else if( *sc_end == ',' ) {
/* A new entry is to check. */
if( *(sc_end + 1) == ' ') {
sc = sc_end+2;
} else {
/* error condition */
sc = NULL;
}
}
}
}
} else {
DEBUG_WEBDAV("Request failed, don't take session header.");
}
if( key ) {
DEBUG_WEBDAV("----> Session-key: %s", key);
SAFE_FREE(ctx->dav_session.session_key);
ctx->dav_session.session_key = key;
}
}
/*
* this hook is called just after a request has been created, before its sent.
* Here it is used to set the proxy connection header if available.
*/
static void request_created_hook(ne_request *req, void *userdata,
const char *method, const char *requri)
{
// FIXME Can possibly be merged with pre_send_hook
csync_owncloud_ctx_t *ctx = (csync_owncloud_ctx_t *) userdata;
(void) method;
(void) requri;
if( !req ) return;
if(ctx->dav_session.proxy_type) {
/* required for NTLM */
ne_add_request_header(req, "Proxy-Connection", "Keep-Alive");
}
}
/*
* this hook is called just before a request has been sent.
* Here it is used to set the session cookie if available.
*/
static void pre_send_hook(ne_request *req, void *userdata,
ne_buffer *header)
{
csync_owncloud_ctx_t *ctx = (csync_owncloud_ctx_t *) userdata;
if( !req ) return;
if(ctx->dav_session.session_key) {
ne_buffer_concat(header, "Cookie: ", ctx->dav_session.session_key, "\r\n", NULL);
} else {
DEBUG_WEBDAV("csync pre_send_hook We don't have a Auth Cookie (session_key), this is wrong!");
}
}
static int post_send_hook(ne_request *req, void *userdata,
const ne_status *status)
{
const char *location;
csync_owncloud_ctx_t *ctx = (csync_owncloud_ctx_t *) userdata;
(void) status;
location = ne_get_response_header(req, "Location");
if( !location ) return NE_OK;
if( ctx->dav_session.redir_callback ) {
if( ctx->dav_session.redir_callback( ctx->csync_ctx, location ) ) {
return NE_REDIRECT;
} else {
return NE_RETRY;
}
}
return NE_REDIRECT;
}
/*
* Connect to a DAV server
* This function sets the flag _connected if the connection is established
* and returns if the flag is set, so calling it frequently is save.
*/
int dav_connect(CSYNC *csyncCtx, const char *base_url) {
int useSSL = 0;
int rc;
char protocol[6] = {'\0'};
char uaBuf[256];
char *path = NULL;
char *scheme = NULL;
char *host = NULL;
unsigned int port = 0;
int proxystate = -1;
csync_owncloud_ctx_t *ctx = csyncCtx->owncloud_context;
struct csync_client_certs_s* clientCerts = csyncCtx->clientCerts;
if (ctx->_connected) {
return 0;
}
rc = c_parse_uri( base_url, &scheme,
&ctx->dav_session.user,
&ctx->dav_session.pwd,
&host, &port, &path );
if( rc < 0 ) {
DEBUG_WEBDAV("Failed to parse uri %s", base_url );
goto out;
}
DEBUG_WEBDAV("* scheme %s", scheme );
DEBUG_WEBDAV("* host %s", host );
DEBUG_WEBDAV("* port %u", port );
DEBUG_WEBDAV("* path %s", path );
if( strcmp( scheme, "owncloud" ) == 0 || strcmp( scheme, "http" ) == 0 ) {
strcpy( protocol, "http");
} else if( strcmp( scheme, "ownclouds" ) == 0 || strcmp( scheme, "https") == 0 ) {
strcpy( protocol, "https");
useSSL = 1;
} else {
DEBUG_WEBDAV("Invalid scheme %s, go out here!", scheme );
rc = -1;
goto out;
}
DEBUG_WEBDAV("* user %s", ctx->dav_session.user ? ctx->dav_session.user : "");
if (port == 0) {
port = ne_uri_defaultport(protocol);
}
ctx->dav_session.ctx = ne_session_create( protocol, host, port);
if (ctx->dav_session.ctx == NULL) {
DEBUG_WEBDAV("Session create with protocol %s failed", protocol );
rc = -1;
goto out;
}
if (ctx->dav_session.read_timeout != 0) {
ne_set_read_timeout(ctx->dav_session.ctx, ctx->dav_session.read_timeout);
DEBUG_WEBDAV("Timeout set to %u seconds", ctx->dav_session.read_timeout );
}
// Should never take more than some seconds, 30 is really a max.
ne_set_connect_timeout(ctx->dav_session.ctx, 30);
snprintf( uaBuf, sizeof(uaBuf), "Mozilla/5.0 (%s) mirall/%s (csyncoC)",
CSYNC_STRINGIFY( MIRALL_VERSION ), csync_owncloud_get_platform() );
ne_set_useragent( ctx->dav_session.ctx, uaBuf);
ne_set_server_auth(ctx->dav_session.ctx, authentication_callback_by_neon, ctx);
if( useSSL ) {
if (!ne_has_support(NE_FEATURE_SSL)) {
DEBUG_WEBDAV("Error: SSL is not enabled.");
rc = -1;
goto out;
}
if(clientCerts != NULL) {
ne_ssl_client_cert *clicert;
DEBUG_WEBDAV("dav_connect: certificatePath and certificatePasswd are set, so we use it" );
DEBUG_WEBDAV(" with certificatePath: %s", clientCerts->certificatePath );
DEBUG_WEBDAV(" with certificatePasswd: %s", clientCerts->certificatePasswd );
clicert = ne_ssl_clicert_read ( clientCerts->certificatePath );
if ( clicert == NULL ) {
DEBUG_WEBDAV ( "Error read certificate : %s", ne_get_error ( ctx->dav_session.ctx ) );
} else {
if ( ne_ssl_clicert_encrypted ( clicert ) ) {
int rtn = ne_ssl_clicert_decrypt ( clicert, clientCerts->certificatePasswd );
if ( !rtn ) {
DEBUG_WEBDAV ( "Certificate was deciphered successfully." );
ne_ssl_set_clicert ( ctx->dav_session.ctx, clicert );
} else {
DEBUG_WEBDAV ( "Errors while deciphering certificate: %s", ne_get_error ( ctx->dav_session.ctx ) );
}
}
}
} else {
DEBUG_WEBDAV("dav_connect: error with csync_client_certs_s* clientCerts");
}
ne_ssl_trust_default_ca( ctx->dav_session.ctx );
ne_ssl_set_verify( ctx->dav_session.ctx, ssl_callback_by_neon, ctx);
}
/* Hook called when a request is created. It sets the proxy connection header. */
ne_hook_create_request( ctx->dav_session.ctx, request_created_hook, ctx );
/* Hook called after response headers are read. It gets the Session ID. */
ne_hook_post_headers( ctx->dav_session.ctx, post_request_hook, ctx );
/* Hook called before a request is sent. It sets the cookies. */
ne_hook_pre_send( ctx->dav_session.ctx, pre_send_hook, ctx );
/* Hook called after request is dispatched. Used for handling possible redirections. */
ne_hook_post_send( ctx->dav_session.ctx, post_send_hook, ctx );
/* Proxy support */
proxystate = configureProxy( ctx, ctx->dav_session.ctx );
if( proxystate < 0 ) {
DEBUG_WEBDAV("Error: Proxy-Configuration failed.");
} else if( proxystate > 0 ) {
ne_set_proxy_auth( ctx->dav_session.ctx, proxy_authentication_callback_by_neon, ctx );
}
ctx->_connected = 1;
rc = 0;
out:
SAFE_FREE(path);
SAFE_FREE(host);
SAFE_FREE(scheme);
return rc;
}
char *owncloud_error_string(CSYNC* ctx)
{
return ctx->owncloud_context->dav_session.error_string;
}
int owncloud_commit(CSYNC* ctx) {
if (!ctx->owncloud_context) {
return 0;
}
if( ctx->owncloud_context->dav_session.ctx ) {
ne_forget_auth(ctx->owncloud_context->dav_session.ctx);
ne_session_destroy(ctx->owncloud_context->dav_session.ctx );
ctx->owncloud_context->dav_session.ctx = 0;
}
/* DEBUG_WEBDAV( "********** vio_module_shutdown" ); */
ctx->owncloud_context->dav_session.ctx = 0;
// ne_sock_exit();
ctx->owncloud_context->_connected = 0; /* triggers dav_connect to go through the whole neon setup */
SAFE_FREE( ctx->owncloud_context->dav_session.user );
SAFE_FREE( ctx->owncloud_context->dav_session.pwd );
SAFE_FREE( ctx->owncloud_context->dav_session.session_key);
SAFE_FREE( ctx->owncloud_context->dav_session.error_string );
return 0;
}
void owncloud_destroy(CSYNC* ctx)
{
owncloud_commit(ctx);
SAFE_FREE(ctx->owncloud_context);
if (ctx->clientCerts) {
SAFE_FREE(ctx->clientCerts->certificatePasswd);
SAFE_FREE(ctx->clientCerts->certificatePath);
SAFE_FREE(ctx->clientCerts);
}
ne_sock_exit();
}
int owncloud_set_property(CSYNC* ctx, const char *key, void *data) {
#define READ_STRING_PROPERTY(P) \
if (c_streq(key, #P)) { \
SAFE_FREE(ctx->owncloud_context->dav_session.P); \
ctx->owncloud_context->dav_session.P = c_strdup((const char*)data); \
return 0; \
}
READ_STRING_PROPERTY(session_key)
READ_STRING_PROPERTY(proxy_type)
READ_STRING_PROPERTY(proxy_host)
READ_STRING_PROPERTY(proxy_user)
READ_STRING_PROPERTY(proxy_pwd)
#undef READ_STRING_PROPERTY
if (c_streq(key, "proxy_port")) {
ctx->owncloud_context->dav_session.proxy_port = *(int*)(data);
return 0;
}
if (c_streq(key, "read_timeout") || c_streq(key, "timeout")) {
ctx->owncloud_context->dav_session.read_timeout = *(int*)(data);
return 0;
}
if( c_streq(key, "get_dav_session")) {
/* Give the ne_session to the caller */
*(ne_session**)data = ctx->owncloud_context->dav_session.ctx;
return 0;
}
if( c_streq(key, "redirect_callback")) {
if (data) {
csync_owncloud_redirect_callback_t* cb_wrapper = data;
ctx->owncloud_context->dav_session.redir_callback = *cb_wrapper;
} else {
ctx->owncloud_context->dav_session.redir_callback = NULL;
}
}
if( c_streq(key, "SSLClientCerts")) {
if(ctx->clientCerts != NULL) {
SAFE_FREE(ctx->clientCerts->certificatePasswd);
SAFE_FREE(ctx->clientCerts->certificatePath);
SAFE_FREE(ctx->clientCerts);
ctx->clientCerts = NULL;
}
if (data) {
struct csync_client_certs_s* clientCerts = (struct csync_client_certs_s*) data;
struct csync_client_certs_s* newCerts = c_malloc(sizeof(struct csync_client_certs_s));
newCerts->certificatePath = c_strdup(clientCerts->certificatePath);
newCerts->certificatePasswd = c_strdup(clientCerts->certificatePasswd);
ctx->clientCerts = newCerts;
} else {
DEBUG_WEBDAV("error: in owncloud_set_property for 'SSLClientCerts'" );
}
}
return -1;
}
void owncloud_init(CSYNC* ctx) {
ne_sock_init();
ctx->owncloud_context = c_malloc( sizeof( struct csync_owncloud_ctx_s ));
ctx->owncloud_context->csync_ctx = ctx; // back reference
}
/* vim: set ts=4 sw=4 et cindent: */

View file

@ -1,36 +0,0 @@
/*
* libcsync -- a library to sync a directory with another
*
* Copyright (c) 2011 by Andreas Schneider <asn@cryptomilk.org>
* Copyright (c) 2012 by Klaas Freitag <freitag@owncloud.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef CSYNC_OWNCLOUD_H
#define CSYNC_OWNCLOUD_H
#include "csync.h"
#include "vio/csync_vio.h"
// Public API used by csync
int owncloud_commit(CSYNC* ctx);
void owncloud_destroy(CSYNC* ctx);
char *owncloud_error_string(CSYNC* ctx);
int owncloud_set_property(CSYNC* ctx, const char *key, void *data);
void owncloud_init(CSYNC* ctx);
int dav_connect(CSYNC* ctx, const char *base_url);
#endif /* CSYNC_OWNCLOUD_H */

View file

@ -1,115 +0,0 @@
/*
* libcsync -- a library to sync a directory with another
*
* Copyright (c) 2011 by Andreas Schneider <asn@cryptomilk.org>
* Copyright (c) 2012 by Klaas Freitag <freitag@owncloud.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef CSYNC_OWNCLOUD_PRIVATE_H
#define CSYNC_OWNCLOUD_PRIVATE_H
#include <errno.h>
#include <stdio.h>
#include <time.h>
#include <limits.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include "config_csync.h"
#ifdef NEON_WITH_LFS /* Switch on LFS in libneon. Never remove the NE_LFS! */
#define NE_LFS
#endif
#include <neon/ne_basic.h>
#include <neon/ne_socket.h>
#include <neon/ne_session.h>
#include <neon/ne_request.h>
#include <neon/ne_props.h>
#include <neon/ne_auth.h>
#include <neon/ne_dates.h>
#include <neon/ne_compress.h>
#include <neon/ne_redirect.h>
#include "c_rbtree.h"
#include "c_lib.h"
#include "csync.h"
#include "csync_misc.h"
#include "csync_macros.h"
#include "c_private.h"
#include "vio/csync_vio.h"
#include "csync_log.h"
#include "csync_owncloud.h"
#define DEBUG_WEBDAV(...) csync_log( 9, "oc_module", __VA_ARGS__);
typedef int (*csync_owncloud_redirect_callback_t)(CSYNC* ctx, const char* uri);
/* Struct with the WebDAV session */
struct dav_session_s {
ne_session *ctx;
char *user;
char *pwd;
char *proxy_type;
char *proxy_host;
int proxy_port;
char *proxy_user;
char *proxy_pwd;
char *session_key;
char *error_string;
int read_timeout;
csync_owncloud_redirect_callback_t redir_callback;
};
struct csync_owncloud_ctx_s {
CSYNC *csync_ctx;
// For the WebDAV connection
struct dav_session_s dav_session; /* The DAV Session, initialised in dav_connect */
int _connected; /* flag to indicate if a connection exists, ie.
the dav_session is valid */
};
typedef struct csync_owncloud_ctx_s csync_owncloud_ctx_t;
//typedef csync_owncloud_ctx_t* csync_owncloud_ctx_p;
void set_errno_from_http_errcode( int err );
void set_error_message( csync_owncloud_ctx_t *ctx, const char *msg );
void set_errno_from_neon_errcode(csync_owncloud_ctx_t *ctx, int neon_code );
int http_result_code_from_session(csync_owncloud_ctx_t *ctx);
void set_errno_from_session(csync_owncloud_ctx_t *ctx);
time_t oc_httpdate_parse( const char *date );
const char* csync_owncloud_get_platform(void);
char *_cleanPath( const char* uri );
#endif // CSYNC_OWNCLOUD_PRIVATE_H

View file

@ -1,125 +0,0 @@
/*
* libcsync -- a library to sync a directory with another
*
* Copyright (c) 2011 by Andreas Schneider <asn@cryptomilk.org>
* Copyright (c) 2012 by Klaas Freitag <freitag@owncloud.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "csync_owncloud.h"
#include "csync_owncloud_private.h"
#include "csync_misc.h"
void set_errno_from_http_errcode( int err ) {
int new_errno = 0;
switch(err) {
case 200: /* OK */
case 201: /* Created */
case 202: /* Accepted */
case 203: /* Non-Authoritative Information */
case 204: /* No Content */
case 205: /* Reset Content */
case 207: /* Multi-Status */
case 304: /* Not Modified */
new_errno = 0;
break;
case 401: /* Unauthorized */
case 402: /* Payment Required */
case 407: /* Proxy Authentication Required */
case 405:
new_errno = EPERM;
break;
case 301: /* Moved Permanently */
case 303: /* See Other */
case 404: /* Not Found */
case 410: /* Gone */
new_errno = ENOENT;
break;
case 408: /* Request Timeout */
case 504: /* Gateway Timeout */
new_errno = EAGAIN;
break;
case 423: /* Locked */
new_errno = EACCES;
break;
case 400: /* Bad Request */
case 403: /* Forbidden */
case 409: /* Conflict */
case 411: /* Length Required */
case 412: /* Precondition Failed */
case 414: /* Request-URI Too Long */
case 415: /* Unsupported Media Type */
case 424: /* Failed Dependency */
case 501: /* Not Implemented */
new_errno = EINVAL;
break;
case 507: /* Insufficient Storage */
new_errno = ENOSPC;
break;
case 206: /* Partial Content */
case 300: /* Multiple Choices */
case 302: /* Found */
case 305: /* Use Proxy */
case 306: /* (Unused) */
case 307: /* Temporary Redirect */
case 406: /* Not Acceptable */
case 416: /* Requested Range Not Satisfiable */
case 417: /* Expectation Failed */
case 422: /* Unprocessable Entity */
case 500: /* Internal Server Error */
case 502: /* Bad Gateway */
case 505: /* HTTP Version Not Supported */
new_errno = EIO;
break;
case 503: /* Service Unavailable */
new_errno = ERRNO_SERVICE_UNAVAILABLE;
break;
case 413: /* Request Entity too Large */
new_errno = EFBIG;
break;
default:
new_errno = EIO;
}
errno = new_errno;
}
// as per http://sourceforge.net/p/predef/wiki/OperatingSystems/
// extend as required
const char* csync_owncloud_get_platform() {
#if defined (_WIN32)
return "Windows";
#elif defined(__APPLE__)
return "Macintosh";
#elif defined(__gnu_linux__)
return "Linux";
#elif defined(__DragonFly__)
/* might also define __FreeBSD__ */
return "DragonFlyBSD";
#elif defined(__FreeBSD__)
return "FreeBSD";
#elif defined(__NetBSD__)
return "NetBSD";
#elif defined(__OpenBSD__)
return "OpenBSD";
#elif defined(sun) || defined(__sun)
return "Solaris";
#else
return "Unknown OS";
#endif
}

View file

@ -77,9 +77,6 @@ enum csync_replica_e {
typedef struct csync_file_stat_s csync_file_stat_t;
struct csync_owncloud_ctx_s; // csync_owncloud.c
/**
* @brief csync public structure
*/
@ -169,9 +166,6 @@ struct csync_s {
bool db_is_empty;
bool ignore_hidden_files;
struct csync_owncloud_ctx_s *owncloud_context;
};

View file

@ -1,57 +0,0 @@
project(httpbflib C)
find_package(Neon)
set(HTTPBF_PUBLIC_INCLUDE_DIRS
${CMAKE_CURRENT_SOURCE_DIR}/src
${NEON_INCLUDE_DIRS}
CACHE INTERNAL "httpbflib public include directories"
)
set(HTTPBF_LIBRARY
httpbf
CACHE INTERNAL "httpbf library"
)
set(HTTPBF_LINK_LIBRARIES
${HTTPBF_LIBRARY}
)
set(httpbf_SRCS
src/httpbf.c
)
set(httpbf_HEADERS
src/httpbf.h
)
include_directories(
${HTTPBF_PUBLIC_INCLUDE_DIRS}
)
add_library(${HTTPBF_LIBRARY} STATIC ${httpbf_SRCS})
target_link_libraries(${HTTPBF_LIBRARY} ${NEON_LIBRARIES})
if(NOT WIN32)
add_definitions( -fPIC )
endif()
INSTALL(
TARGETS
${HTTPBF_LIBRARY}
LIBRARY DESTINATION
${LIB_INSTALL_DIR}
ARCHIVE DESTINATION
${LIB_INSTALL_DIR}
RUNTIME DESTINATION
${BIN_INSTALL_DIR}
)
if (NOT APPLE)
INSTALL(
FILES
${httpbf_HEADERS}
DESTINATION
${CMAKE_INSTALL_INCLUDEDIR}
)
endif (NOT APPLE)

View file

@ -1,29 +0,0 @@
This is a little code that does ownCloud file chunking.
Basically to put a local file to an url:
(Also see the client example code in client dir.)
/* Initialize the transfer, get a transfer struct. */
hbf_transfer_t *trans = hbf_init_transfer( url );
Hbf_State state;
if( trans ) {
int fd = open_local_file( file );
/* create a neon session to use for the transfer */
ne_session *session = create_neon_session(uri);
if( session && fd > -1 ) {
/* Prepare the list of chunks, ie. calculate chunks and write back to trans. */
state = hbf_splitlist(trans, fd);
if( state == HBF_SUCCESS ) {
/* Transfer all the chunks through the HTTP session using PUT. */
state = hbf_transfer( session, trans, "PUT" );
}
}
}
GET a large file:
Do GET Range requests.

View file

@ -1,36 +0,0 @@
project(client C)
set(CLIENT_EXECUTABLE httpbfclient CACHE INTERNAL "httpbf client")
set(CLIENT_LINK_LIBRARIES ${NEON_LIBRARIES} ${HBF_LIBRARY})
set(HTTPBF_INCLUDE_DIR ${CMAKE_SOURCE_DIR}/src})
if(NOT LINUX)
list(APPEND CLIENT_LINK_LIBRARIES ${ARGP_LIBRARIES})
endif()
set(client_SRCS
httpbf_client.c
)
include_directories(
${HTTPBF_INCLUDE_DIR}
${HTTPBF_PUBLIC_INCLUDE_DIRS}
)
add_executable(${CLIENT_EXECUTABLE} ${client_SRCS})
target_link_libraries(${CLIENT_EXECUTABLE} ${CLIENT_LINK_LIBRARIES})
set_target_properties(
${CLIENT_EXECUTABLE}
PROPERTIES
OUTPUT_NAME
httpbf
)
# install( TARGETS ${CLIENT_EXECUTABLE} DESTINATION ${BIN_INSTALL_DIR} )
install(TARGETS ${CLIENT_EXECUTABLE} DESTINATION bin)

View file

@ -1,225 +0,0 @@
/*
* httpbf - send big files via http
*
* Copyright (c) 2012 Klaas Freitag <freitag@owncloud.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <stdbool.h>
#include <unistd.h>
#include <getopt.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <neon/ne_auth.h>
#include "httpbf.h"
/* Program documentation. */
static char doc[] = "Usage: httpbf [OPTION...] LOCAL REMOTEDIR\n\
httpbf - command line client to upload big files via http.\n\
\n\
Transfer a big file to a remote directory on ownCloud.\n\
\n\
-?, --help Give this help list\n\
--usage Give a short usage message\n\
-V, --version Print program version\n\
";
static char _user[NE_ABUFSIZ];
static char _pwd[NE_ABUFSIZ];
/* The options we understand. */
static const struct option long_options[] =
{
{"version", no_argument, 0, 'V' },
{"usage", no_argument, 0, 'h' },
{0, 0, 0, 0}
};
static const char* httpbf_version = "0.1";
static void print_version()
{
printf( "%s\n", httpbf_version );
exit(0);
}
static void print_help()
{
printf( "%s\n", doc );
exit(0);
}
static int ne_auth( void *userdata, const char *realm, int attempt,
char *username, char *password)
{
(void) userdata;
(void) realm;
if( username && password ) {
if( _user ) {
/* allow user without password */
if( strlen( _user ) < NE_ABUFSIZ ) {
strcpy( username, _user );
}
if( _pwd && strlen( _pwd ) < NE_ABUFSIZ ) {
strcpy( password, _pwd );
}
}
}
return attempt;
}
static int parse_args(int argc, char **argv)
{
while(optind < argc) {
int c = -1;
struct option *opt = NULL;
int result = getopt_long( argc, argv, "Vh", long_options, &c );
if( result == -1 ) {
break;
}
switch(result) {
case 'V':
print_version();
break;
case 'h':
print_help();
break;
case 0:
opt = (struct option*)&(long_options[c]);
if(strcmp(opt->name, "no-name-yet")) {
} else {
fprintf(stderr, "Argument: No idea what!\n");
}
break;
default:
break;
}
}
return optind;
}
static ne_session* create_neon_session( const char *url )
{
ne_uri uri;
ne_session *sess = NULL;
memset( _user, 0, NE_ABUFSIZ );
memset( _pwd, 0, NE_ABUFSIZ );
if( ne_uri_parse( url, &uri ) == 0 ) {
unsigned int port = ne_uri_defaultport(uri.scheme);
if( uri.userinfo ) {
char *slash = NULL;
strcpy( _user, uri.userinfo );
slash = strchr( _user, ':');
if( slash ) {
strcpy( _pwd, slash+1);
*slash = 0;
}
}
sess = ne_session_create(uri.scheme, uri.host, port);
ne_set_server_auth(sess, ne_auth, 0 );
ne_uri_free(&uri);
}
return sess;
}
static int open_local_file( const char *file )
{
int fd = -1;
if( !file ) return -1;
fd = open(file, O_RDONLY);
return fd;
}
static void transfer( const char* local, const char* uri )
{
if( !(local && uri )) return;
char *whole_url;
int len;
char *filename = basename(local);
if( ! filename ) {
return;
}
len = strlen(filename)+strlen(uri)+2;
whole_url = malloc( len );
strcpy(whole_url, uri);
strcat(whole_url, "/");
strcat(whole_url, filename);
hbf_transfer_t *trans = hbf_init_transfer( whole_url );
Hbf_State state;
if( trans ) {
ne_session *session = create_neon_session(uri);
if( session ) {
int fd = open_local_file( local );
if( fd > -1 ) {
state = hbf_splitlist(trans, fd );
if( state == HBF_SUCCESS ) {
state = hbf_transfer( session, trans, "PUT" );
}
}
ne_session_destroy(session);
}
}
if( state != HBF_SUCCESS ) {
printf("Upload failed: %s\n", hbf_error_string(state));
printf(" HTTP result %d, Server Error: %s\n",
trans->status_code, trans->error_string ? trans->error_string : "<empty>" );
}
/* Print the result of the recent transfer */
hbf_free_transfer( trans );
free(whole_url);
}
int main(int argc, char **argv) {
int rc = 0;
char errbuf[256] = {0};
parse_args(argc, argv);
/* two options must remain as source and target */
/* printf("ARGC: %d -> optind: %d\n", argc, optind ); */
if( argc - optind < 2 ) {
print_help();
}
transfer( argv[optind], argv[optind+1]);
}
/* vim: set ts=8 sw=2 et cindent: */

View file

@ -1,38 +0,0 @@
project(httpbf C)
# where to look first for cmake modules, before ${CMAKE_ROOT}/Modules/ is checked
set(HTTPBF_PUBLIC_INCLUDE_DIRS
${CMAKE_CURRENT_SOURCE_DIR}
CACHE INTERNAL "httpbf public include directories"
)
set(HTTPBFLIB_PRIVATE_INCLUDE_DIRS
)
set(HBF_LIBRARY
httpbf
CACHE INTERNAL "httpbflib library"
)
set(HTTPBFLIB_LINK_LIBRARIES
${HBF_LIBRARY}
)
set(httpbflib_SRCS
httpbf.c
)
include_directories(
${NEON_INCLUDE_DIRS}
${GLIB2_INCLUDE_DIRS}
)
if(NOT WIN32)
add_definitions( -fPIC )
endif()
add_library(${HBF_LIBRARY} SHARED ${httpbflib_SRCS} )
target_link_libraries(${HBF_LIBRARY} ${NEON_LIBRARIES})

View file

@ -1,688 +0,0 @@
/*
* httpbf - send big files via http
*
* Copyright (c) 2012 Klaas Freitag <freitag@owncloud.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <string.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <sys/timeb.h>
#include <sys/time.h>
#include <inttypes.h>
#include "httpbf.h"
#include <neon/ne_session.h>
#include <neon/ne_request.h>
#include <neon/ne_basic.h>
// #ifdef NDEBUG
// #define DEBUG_HBF(...)
// #else
#define DEBUG_HBF(...) { if(transfer->log_cb) { \
char buf[1024]; \
snprintf(buf, 1024, __VA_ARGS__); \
transfer->log_cb(__func__, buf, transfer->user_data); \
} }
// #endif
#define DEFAULT_BLOCK_SIZE (10*1024*1024)
/* Platform specific defines go here. */
#ifdef _WIN32
#define _hbf_fstat _fstat64
typedef struct stat64 hbf_stat_t;
#else
#define _hbf_fstat fstat
typedef struct stat hbf_stat_t;
#endif
static int transfer_id( hbf_stat_t *sb ) {
struct timeval tp;
int res;
int r;
if( gettimeofday(&tp, 0) < 0 ) {
return 0;
}
/* build a Unique ID:
* take the current epoch and shift 8 bits up to keep the least bits.
* than add the milliseconds, again shift by 8
* and finally add the least 8 bit of the inode of the file.
*/
res = tp.tv_sec; /* epoche value in seconds */
res = res << 8;
r = (sb->st_ino & 0xFF);
res += r; /* least six bit of inode */
res = res << sizeof(tp.tv_usec);
res += tp.tv_usec; /* milliseconds */
return res;
}
hbf_transfer_t *hbf_init_transfer( const char *dest_uri ) {
hbf_transfer_t * transfer = NULL;
transfer = malloc( sizeof(hbf_transfer_t) );
memset(transfer, 0, sizeof(hbf_transfer_t));
/* store the target uri */
transfer->url = strdup(dest_uri);
transfer->status_code = 200;
transfer->error_string = NULL;
transfer->start_id = 0;
transfer->block_size = DEFAULT_BLOCK_SIZE;
transfer->threshold = transfer->block_size;
transfer->modtime_accepted = 0;
transfer->oc_header_modtime = 0;
return transfer;
}
/* Create the splitlist of a given file descriptor */
Hbf_State hbf_splitlist(hbf_transfer_t *transfer, int fd ) {
hbf_stat_t sb;
int64_t num_blocks;
int64_t blk_size;
int64_t remainder = 0;
if( ! transfer ) {
return HBF_PARAM_FAIL;
}
if( fd <= 0 ) {
DEBUG_HBF("File descriptor is invalid.");
return HBF_PARAM_FAIL;
}
if( _hbf_fstat(fd, &sb) < 0 ) {
DEBUG_HBF("Failed to stat the file descriptor: errno = %d", errno);
return HBF_FILESTAT_FAIL;
}
/* Store the file characteristics. */
transfer->fd = fd;
transfer->stat_size = sb.st_size;
transfer->modtime = sb.st_mtime;
transfer->previous_etag = NULL;
#ifndef NDEBUG
transfer->calc_size = 0;
#endif
DEBUG_HBF("block_size: %" PRId64 " threshold: %" PRId64 " st_size: %" PRId64, transfer->block_size, transfer->threshold, sb.st_size );
/* calc the number of blocks to split in */
blk_size = transfer->block_size;
if (sb.st_size < transfer->threshold) {
blk_size = transfer->threshold;
}
num_blocks = sb.st_size / blk_size;
/* there migth be a remainder. */
remainder = sb.st_size - num_blocks * blk_size;
/* if there is a remainder, add one block */
if( remainder > 0 ) {
num_blocks++;
}
/* The file has size 0. There still needs to be at least one block. */
if( sb.st_size == 0 ) {
num_blocks = 1;
blk_size = 0;
}
DEBUG_HBF("num_blocks: %" PRId64 " rmainder: %" PRId64 " blk_size: %" PRId64, num_blocks, remainder, blk_size );
if( num_blocks ) {
int cnt;
int64_t overall = 0;
/* create a datastructure for the transfer data */
transfer->block_arr = calloc(num_blocks, sizeof(hbf_block_t*));
transfer->block_cnt = num_blocks;
transfer->transfer_id = transfer_id(&sb);
transfer->start_id = 0;
for( cnt=0; cnt < num_blocks; cnt++ ) {
/* allocate a block struct and fill */
hbf_block_t *block = malloc( sizeof(hbf_block_t) );
memset(block, 0, sizeof(hbf_block_t));
block->seq_number = cnt;
if( cnt > 0 ) {
block->start = cnt * blk_size;
}
block->size = blk_size;
block->state = HBF_NOT_TRANSFERED;
/* consider the remainder if we're already at the end */
if( cnt == num_blocks-1 && remainder > 0 ) {
block->size = remainder;
}
overall += block->size;
/* store the block data into the result array in the transfer */
*((transfer->block_arr)+cnt) = block;
DEBUG_HBF("created block %d (start: %" PRId64 " size: %" PRId64 ")", cnt, block->start, block->size );
}
#ifndef NDEBUG
transfer->calc_size = overall;
#endif
}
return HBF_SUCCESS;
}
void hbf_free_transfer( hbf_transfer_t *transfer ) {
int cnt;
if( !transfer ) return;
for( cnt = 0; cnt < transfer->block_cnt; cnt++ ) {
hbf_block_t *block = transfer->block_arr[cnt];
if( !block ) continue;
if( block->http_error_msg ) free( block->http_error_msg );
if( block->etag ) free( block->etag );
}
free( transfer->block_arr );
free( transfer->url );
free( transfer->file_id );
if( transfer->error_string) free( (void*) transfer->error_string );
free( transfer );
}
static char* get_transfer_url( hbf_transfer_t *transfer, int indx ) {
char *res = NULL;
hbf_block_t *block = NULL;
if( ! transfer ) return NULL;
if( indx >= transfer->block_cnt ) return NULL;
block = transfer->block_arr[indx];
if( ! block ) return NULL;
if( transfer->block_cnt == 1 ) {
/* Just one chunk. We send as an ordinary request without chunking. */
res = strdup( transfer->url );
} else {
char trans_id_str[32];
char trans_block_str[32];
char indx_str[32];
int len = 1; /* trailing zero. */
int tlen = 0;
tlen = sprintf( trans_id_str, "%u", transfer->transfer_id );
if( tlen < 0 ) {
return NULL;
}
len += tlen;
tlen = sprintf( trans_block_str, "%u", transfer->block_cnt );
if( tlen < 0 ) {
return NULL;
}
len += tlen;
tlen = sprintf( indx_str, "%u", indx );
if( tlen < 0 ) {
return NULL;
}
len += tlen;
len += strlen(transfer->url);
len += strlen("-chunking---");
res = malloc(len);
/* Note: must be %u for unsigned because one does not want '--' */
if( sprintf(res, "%s-chunking-%u-%u-%u", transfer->url, transfer->transfer_id,
transfer->block_cnt, indx ) < 0 ) {
return NULL;
}
}
return res;
}
/*
* perform one transfer of one block.
* returns HBF_TRANSFER_SUCCESS if the transfer of this block was a success
* returns HBF_SUCCESS if the server aknoweldge that he received all the blocks
*/
static int _hbf_dav_request(hbf_transfer_t *transfer, ne_request *req, int fd, hbf_block_t *blk ) {
Hbf_State state = HBF_TRANSFER_SUCCESS;
int res;
const ne_status *req_status = NULL;
const char *etag = NULL;
(void) transfer;
if( ! (blk && req) ) return HBF_PARAM_FAIL;
ne_set_request_body_fd(req, fd, blk->start, blk->size);
DEBUG_HBF("Block: %d , Start: %" PRId64 " and Size: %" PRId64 "", blk->seq_number, blk->start, blk->size );
res = ne_request_dispatch(req);
req_status = ne_get_status( req );
switch(res) {
case NE_OK:
blk->state = HBF_TRANSFER_FAILED;
state = HBF_FAIL;
etag = 0;
if( req_status->klass == 2 ) {
state = HBF_TRANSFER_SUCCESS;
blk->state = HBF_TRANSFER_SUCCESS;
etag = ne_get_response_header(req, "ETag");
if (etag && etag[0]) {
/* When there is an etag, it means the transfer was complete */
state = HBF_SUCCESS;
if( etag[0] == '"' && etag[ strlen(etag)-1] == '"') {
int len = strlen( etag )-2;
blk->etag = malloc( len+1 );
strncpy( blk->etag, etag+1, len );
blk->etag[len] = '\0';
} else {
blk->etag = strdup( etag );
}
} else {
/* DEBUG_HBF("OOOOOOOO No etag returned!"); */
}
/* check if the server was able to set the mtime already. */
etag = ne_get_response_header(req, "X-OC-MTime");
if( etag && strcmp(etag, "accepted") == 0 ) {
/* the server acknowledged that the mtime was set. */
transfer->modtime_accepted = 1;
}
etag = ne_get_response_header(req, "OC-FileID");
if( etag ) {
transfer->file_id = strdup( etag );
}
}
break;
case NE_AUTH:
state = HBF_AUTH_FAIL;
blk->state = HBF_TRANSFER_FAILED;
break;
case NE_PROXYAUTH:
state = HBF_PROXY_AUTH_FAIL;
blk->state = HBF_TRANSFER_FAILED;
break;
case NE_CONNECT:
state = HBF_CONNECT_FAIL;
blk->state = HBF_TRANSFER_FAILED;
break;
case NE_TIMEOUT:
state = HBF_TIMEOUT_FAIL;
blk->state = HBF_TRANSFER_FAILED;
break;
case NE_ERROR:
state = HBF_FAIL;
blk->state = HBF_TRANSFER_FAILED;
break;
}
blk->http_result_code = req_status->code;
if( req_status->reason_phrase ) {
blk->http_error_msg = strdup(req_status->reason_phrase);
}
return state;
}
Hbf_State hbf_validate_source_file( hbf_transfer_t *transfer ) {
Hbf_State state = HBF_SUCCESS;
hbf_stat_t sb;
if( transfer == NULL ) {
state = HBF_PARAM_FAIL;
}
if( state == HBF_SUCCESS ) {
if( transfer->fd <= 0 ) {
state = HBF_PARAM_FAIL;
}
}
if( state == HBF_SUCCESS ) {
int rc = _hbf_fstat( transfer->fd, &sb );
if( rc != 0 ) {
state = HBF_STAT_FAIL;
}
}
if( state == HBF_SUCCESS ) {
if( sb.st_mtime != transfer->modtime || sb.st_size != transfer->stat_size ) {
state = HBF_SOURCE_FILE_CHANGE;
}
}
return state;
}
/* Get the HTTP error code for the last request */
static int _hbf_http_error_code(ne_session *session) {
const char *msg = ne_get_error( session );
char *msg2;
int err;
err = strtol(msg, &msg2, 10);
if (msg == msg2) {
err = 0;
}
return err;
}
static Hbf_State _hbf_transfer_no_chunk(ne_session *session, hbf_transfer_t *transfer, const char *verb) {
int res;
const ne_status* req_status;
ne_request *req = ne_request_create(session, verb ? verb : "PUT", transfer->url);
if (!req)
return HBF_MEMORY_FAIL;
ne_add_request_header( req, "Content-Type", "application/octet-stream");
ne_set_request_body_fd(req, transfer->fd, 0, transfer->stat_size);
DEBUG_HBF("HBF: chunking not supported for %s", transfer->url);
res = ne_request_dispatch(req);
req_status = ne_get_status( req );
if (res == NE_OK && req_status->klass == 2) {
ne_request_destroy(req);
return HBF_SUCCESS;
}
if( transfer->error_string ) free( transfer->error_string );
transfer->error_string = strdup( ne_get_error(session) );
transfer->status_code = req_status->code;
ne_request_destroy(req);
return HBF_FAIL;
}
Hbf_State hbf_transfer( ne_session *session, hbf_transfer_t *transfer, const char *verb ) {
Hbf_State state = HBF_TRANSFER_SUCCESS;
int cnt;
if( ! session ) {
state = HBF_SESSION_FAIL;
}
if( ! transfer ) {
state = HBF_SPLITLIST_FAIL;
}
if( ! verb ) {
state = HBF_PARAM_FAIL;
}
if(state == HBF_TRANSFER_SUCCESS) {
DEBUG_HBF("%s request to %s", verb, transfer->url);
}
for( cnt=0; state == HBF_TRANSFER_SUCCESS && cnt < transfer->block_cnt; cnt++ ) {
/* cnt goes from O to block_cnt, but block_id starts at start_id and wrap around
* That way if we have not finished uploaded when we reach block_cnt, we re-upload
* the beginning of the file that the server did not have in cache anymore.
*/
int block_id = (cnt + transfer->start_id) % transfer->block_cnt;
hbf_block_t *block = transfer->block_arr[block_id];
char *transfer_url = NULL;
if( ! block ) state = HBF_PARAM_FAIL;
if( transfer->abort_cb ) {
int do_abort = (transfer->abort_cb)(transfer->user_data);
if( do_abort ) {
state = HBF_USER_ABORTED;
transfer->start_id = block_id % transfer->block_cnt;
}
}
if( state == HBF_TRANSFER_SUCCESS ) {
transfer_url = get_transfer_url( transfer, block_id );
if( ! transfer_url ) {
state = HBF_PARAM_FAIL;
}
}
if( state == HBF_TRANSFER_SUCCESS ) {
if( transfer->block_cnt > 1 && cnt > 0 ) {
/* The block count is > 1, check size and mtime before transmitting. */
state = hbf_validate_source_file(transfer);
if( state == HBF_SOURCE_FILE_CHANGE ) {
/* The source file has changed meanwhile */
}
}
}
if( state == HBF_TRANSFER_SUCCESS || state == HBF_SUCCESS ) {
ne_request *req = ne_request_create(session, verb, transfer_url);
if( req ) {
char buf[21];
snprintf(buf, sizeof(buf), "%"PRId64, transfer->stat_size);
ne_add_request_header(req, "OC-Total-Length", buf);
if( transfer->oc_header_modtime > 0 ) {
snprintf(buf, sizeof(buf), "%"PRId64, transfer->oc_header_modtime);
ne_add_request_header(req, "X-OC-Mtime", buf);
}
if( transfer->previous_etag ) {
ne_add_request_header(req, "If-Match", transfer->previous_etag);
}
if( transfer->block_cnt > 1 ) {
ne_add_request_header(req, "OC-Chunked", "1");
snprintf(buf, sizeof(buf), "%"PRId64, transfer->threshold);
ne_add_request_header(req, "OC-Chunk-Size", buf);
}
ne_add_request_header( req, "Content-Type", "application/octet-stream");
state = _hbf_dav_request(transfer, req, transfer->fd, block );
if( state != HBF_TRANSFER_SUCCESS && state != HBF_SUCCESS) {
if( transfer->error_string ) free( transfer->error_string );
transfer->error_string = strdup( ne_get_error(session) );
transfer->start_id = block_id % transfer->block_cnt;
/* Set the code of the last transmission. */
state = HBF_FAIL;
transfer->status_code = transfer->block_arr[block_id]->http_result_code;
}
ne_request_destroy(req);
if (transfer->block_cnt > 1 && state == HBF_SUCCESS && cnt == 0) {
/* Success on the first chunk is suspicious.
It could happen that the server did not support chunking */
int rc = ne_delete(session, transfer_url);
if (rc == NE_OK && _hbf_http_error_code(session) == 204) {
/* If delete suceeded, it means some proxy strips the OC_CHUNKING header
start again without chunking: */
free( transfer_url );
return _hbf_transfer_no_chunk(session, transfer, verb);
}
}
if (state == HBF_TRANSFER_SUCCESS && transfer->chunk_finished_cb) {
transfer->chunk_finished_cb(transfer, block_id, transfer->user_data);
}
} else {
state = HBF_MEMORY_FAIL;
}
}
free( transfer_url );
}
/* do the source file validation finally (again). */
if( state == HBF_TRANSFER_SUCCESS ) {
/* This means that no etag was returned on one of the chunks to indicate
* that the upload was finished. */
state = HBF_TRANSFER_NOT_ACKED;
}
return state;
}
int hbf_fail_http_code( hbf_transfer_t *transfer )
{
int cnt;
if( ! transfer ) return 0;
for( cnt = 0; cnt < transfer->block_cnt; cnt++ ) {
int block_id = (cnt + transfer->start_id) % transfer->block_cnt;
hbf_block_t *block = transfer->block_arr[block_id];
if( block->state != HBF_NOT_TRANSFERED && block->state != HBF_TRANSFER_SUCCESS ) {
return block->http_result_code;
}
}
return 200;
}
const char *hbf_transfer_etag( hbf_transfer_t *transfer )
{
int cnt;
const char *etag = NULL;
if( ! transfer ) return 0;
/* Loop over all parts and do a assertion that there is only one etag. */
for( cnt = 0; cnt < transfer->block_cnt; cnt++ ) {
int block_id = (cnt + transfer->start_id) % transfer->block_cnt;
hbf_block_t *block = transfer->block_arr[block_id];
if( block->etag ) {
if( etag && strcmp(etag, block->etag) != 0 ) {
/* multiple etags in the transfer, not equal. */
DEBUG_HBF( "WARN: etags are not equal in blocks of one single transfer." );
}
etag = block->etag;
}
}
return etag;
}
const char *hbf_transfer_file_id( hbf_transfer_t *transfer )
{
const char *re = NULL;
if(transfer) {
re = transfer->file_id;
}
return re;
}
const char *hbf_error_string(hbf_transfer_t *transfer, Hbf_State state)
{
const char *re;
int cnt;
switch( state ) {
case HBF_SUCCESS:
re = "Ok.";
break;
case HBF_NOT_TRANSFERED: /* never tried to transfer */
re = "Block was not yet tried to transfer.";
break;
case HBF_TRANSFER: /* transfer currently running */
re = "Block is currently transferred.";
break;
case HBF_TRANSFER_FAILED: /* transfer tried but failed */
re = "Block transfer failed.";
break;
case HBF_TRANSFER_SUCCESS: /* transfer succeeded. */
re = "Block transfer successful.";
break;
case HBF_SPLITLIST_FAIL: /* the file could not be split */
re = "Splitlist could not be computed.";
break;
case HBF_SESSION_FAIL:
re = "No valid session in transfer.";
break;
case HBF_FILESTAT_FAIL:
re = "Source file could not be stat'ed.";
break;
case HBF_PARAM_FAIL:
re = "Parameter fail.";
break;
case HBF_AUTH_FAIL:
re = "Authentication fail.";
break;
case HBF_PROXY_AUTH_FAIL:
re = "Proxy Authentication fail.";
break;
case HBF_CONNECT_FAIL:
re = "Connection could not be established.";
break;
case HBF_TIMEOUT_FAIL:
re = "Network timeout.";
break;
case HBF_MEMORY_FAIL:
re = "Out of memory.";
break;
case HBF_STAT_FAIL:
re = "Filesystem stat on file failed.";
break;
case HBF_SOURCE_FILE_CHANGE:
re = "Source file changed too often during upload.";
break;
case HBF_USER_ABORTED:
re = "Transmission aborted by user.";
break;
case HBF_TRANSFER_NOT_ACKED:
re = "The server did not provide an Etag.";
break;
case HBF_FAIL:
default:
for( cnt = 0; cnt < transfer->block_cnt; cnt++ ) {
int block_id = (cnt + transfer->start_id) % transfer->block_cnt;
hbf_block_t *block = transfer->block_arr[block_id];
if( block->state != HBF_NOT_TRANSFERED && block->state != HBF_TRANSFER_SUCCESS
&& block->http_error_msg != NULL) {
return block->http_error_msg;
}
}
re = "Unknown error.";
}
return re;
}
void hbf_set_abort_callback( hbf_transfer_t *transfer, hbf_abort_callback cb)
{
if( transfer ) {
transfer->abort_cb = cb;
}
}
void hbf_set_log_callback(hbf_transfer_t* transfer, hbf_log_callback cb)
{
if( transfer ) {
transfer->log_cb = cb;
}
}

View file

@ -1,142 +0,0 @@
/**
* http big file functions
*
* Copyright (c) 2012 by Klaas Freitag <freitag@owncloud.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _HBF_SEND_H
#define _HBF_SEND_H
#include "config_csync.h"
#ifdef NEON_WITH_LFS /* Switch on LFS in libneon. Never remove the NE_LFS! */
#define NE_LFS
#endif
#include <neon/ne_session.h>
#ifdef __cplusplus
extern "C" {
#endif
enum hbf_state_e {
HBF_SUCCESS,
HBF_NOT_TRANSFERED, /* never tried to transfer */
HBF_TRANSFER, /* transfer currently running */
HBF_TRANSFER_FAILED, /* transfer tried but failed */
HBF_TRANSFER_SUCCESS, /* block transfer succeeded. */
HBF_SPLITLIST_FAIL, /* the file could not be split */
HBF_SESSION_FAIL,
HBF_FILESTAT_FAIL,
HBF_PARAM_FAIL,
HBF_AUTH_FAIL,
HBF_PROXY_AUTH_FAIL,
HBF_CONNECT_FAIL,
HBF_TIMEOUT_FAIL,
HBF_MEMORY_FAIL,
HBF_STAT_FAIL,
HBF_SOURCE_FILE_CHANGE,
HBF_USER_ABORTED,
HBF_TRANSFER_NOT_ACKED,
HBF_FAIL
};
typedef enum hbf_state_e Hbf_State;
typedef struct hbf_block_s hbf_block_t;
struct hbf_block_s {
int seq_number;
int64_t start;
int64_t size;
Hbf_State state;
int http_result_code;
char *http_error_msg;
char *etag;
int tries;
};
typedef struct hbf_transfer_s hbf_transfer_t;
/* Callback for to check on abort */
typedef int (*hbf_abort_callback) (void *);
typedef void (*hbf_log_callback) (const char *, const char *, void*);
typedef void (*hbf_chunk_finished_callback) (hbf_transfer_t*,int, void*);
struct hbf_transfer_s {
hbf_block_t **block_arr;
int block_cnt;
int fd;
int transfer_id;
char *url;
int start_id;
int status_code;
char *error_string;
int64_t stat_size;
time_t modtime;
time_t oc_header_modtime;
int64_t block_size;
int64_t threshold;
void *user_data;
hbf_abort_callback abort_cb;
hbf_log_callback log_cb;
hbf_chunk_finished_callback chunk_finished_cb;
int modtime_accepted;
const char *previous_etag; /* etag send as the If-Match http header */
char *file_id;
#ifndef NDEBUG
int64_t calc_size;
#endif
};
hbf_transfer_t *hbf_init_transfer( const char *dest_uri );
Hbf_State hbf_transfer( ne_session *session, hbf_transfer_t *transfer, const char *verb );
Hbf_State hbf_splitlist( hbf_transfer_t *transfer, int fd );
void hbf_free_transfer( hbf_transfer_t *transfer );
const char *hbf_error_string(hbf_transfer_t* transfer, Hbf_State state);
const char *hbf_transfer_etag( hbf_transfer_t *transfer );
const char *hbf_transfer_file_id( hbf_transfer_t *transfer );
void hbf_set_abort_callback( hbf_transfer_t *transfer, hbf_abort_callback cb);
void hbf_set_log_callback( hbf_transfer_t *transfer, hbf_log_callback cb);
/* returns an http (error) code of the transmission. If the transmission
* succeeded, the code is 200. If it failed, its the error code of the
* first part transmission that failed.
*/
int hbf_fail_http_code( hbf_transfer_t *transfer );
Hbf_State hbf_validate_source_file( hbf_transfer_t *transfer );
#ifdef __cplusplus
}
#endif
#endif

View file

@ -1,18 +0,0 @@
project(hbf_test C )
add_definitions(-DUNIT_TESTING=1)
find_package(CMocka REQUIRED)
include_directories(
${CMAKE_BINARY_DIR}
${CMAKE_CURRENT_SOURCE_DIR}
${CMOCKA_INCLUDE_DIRS}
${NEON_INCLUDE_DIRS}
${HTTPBF_PUBLIC_INCLUDE_DIRS}
)
add_executable(send_test hbf_send_test.c)
target_link_libraries(send_test ${CMOCKA_LIBRARIES} ${NEON_LIBRARIES} ${HBF_LIBRARY} )

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.2 MiB

View file

@ -1,162 +0,0 @@
/*
* httpbf - send big files via http
*
* Copyright (c) 2012 Klaas Freitag <freitag@owncloud.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#define _GNU_SOURCE /* See feature_test_macros(7) */
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include <string.h>
#include <cmocka.h>
#include "config_csync.h"
#include <httpbf.h>
// A test case that does nothing and succeeds.
static void null_test_success(void **state) {
(void) state;
}
static char* test_file( const char* name ) {
if( ! name ) return 0;
char path[260];
strcpy( path, TESTFILEDIR);
if(path[strlen(TESTFILEDIR)-1] != '/')
strcat( path, "/");
strcat( path, name );
return strdup(path);
}
static void test_get_transfer_url( void **state ) {
const char *url = "http://example.org/owncloud";
const char *turl = NULL;
char res[256];
int i;
Hbf_State hbf_state;
hbf_transfer_t *list = NULL;
list = hbf_init_transfer( url );
assert_non_null( list );
/* open a file */
int fd = open( test_file("church.jpg"), O_RDONLY );
assert_true(fd >= 0);
hbf_state = hbf_splitlist(list, fd);
assert_true( hbf_state == HBF_SUCCESS);
for( i=0; i < list->block_cnt; i++ ) {
turl = get_transfer_url( list, i );
sprintf(res, "%s-chunking-%d-%d-%d", url, list->transfer_id,
list->block_cnt, i );
printf( "XX: %s\n", res );
assert_string_equal( turl, res );
}
}
static void test_hbf_init_transfer( void **state ) {
hbf_transfer_t *list = NULL;
const char *url = "http://example.org/owncloud";
list = hbf_init_transfer( url );
assert_non_null( list );
assert_string_equal( url, list->url );
}
/* test with a file size that is not a multiply of the slize size. */
static void test_hbf_splitlist_odd( void **state ){
hbf_transfer_t *list = NULL;
const char *dest_url = "http://localhost/ocm/remote.php/webdav/big/church.jpg";
/* open a file */
int fd = open(test_file("church.jpg"), O_RDONLY);
assert_true(fd >= 0);
int prev_id = 0;
int i;
Hbf_State hbf_state;
/* do a smoke test for uniqueness */
for( i=0; i < 10000; i++) {
list = hbf_init_transfer(dest_url);
assert_non_null(list);
usleep(1);
hbf_state = hbf_splitlist(list, fd);
assert_int_not_equal(list->transfer_id, prev_id);
prev_id = list->transfer_id;
hbf_free_transfer(list);
}
list = hbf_init_transfer(dest_url);
assert_non_null(list);
hbf_state = hbf_splitlist(list, fd);
assert_non_null(list);
assert_int_equal(list->calc_size, list->stat_size);
assert_int_not_equal(list->block_cnt, 0);
assert_true( hbf_state == HBF_SUCCESS);
/* checks on the block list */
int seen_zero_seq = 0;
int prev_seq = -1;
int64_t prev_block_end = -1;
for( i=0; i < list->block_cnt; i++) {
hbf_block_t *blk = list->block_arr[i];
assert_non_null(blk);
if( blk->seq_number == 0 ) seen_zero_seq++;
assert_int_equal(prev_seq, blk->seq_number -1 );
prev_seq = blk->seq_number;
assert_true((prev_block_end+1) == (blk->start));
prev_block_end = blk->start + blk->size;
}
/* Make sure we exactly saw blk->seq_number == 0 exactly one times */
assert_int_equal( seen_zero_seq, 1 );
hbf_free_transfer( list );
}
int main(void) {
const UnitTest tests[] = {
unit_test(null_test_success),
unit_test(test_hbf_splitlist_odd),
unit_test(test_hbf_init_transfer),
unit_test(test_get_transfer_url)
};
return run_tests(tests);
}

View file

@ -36,9 +36,6 @@
#define CSYNC_LOG_CATEGORY_NAME "csync.vio.main"
#include "csync_log.h"
#if USE_NEON
#include "csync_owncloud.h"
#endif
csync_vio_handle_t *csync_vio_opendir(CSYNC *ctx, const char *name) {
switch(ctx->replica) {
@ -132,8 +129,5 @@ char *csync_vio_get_status_string(CSYNC *ctx) {
if(ctx->error_string) {
return ctx->error_string;
}
#ifdef USE_NEON
return owncloud_error_string(ctx);
#endif
return 0;
}

View file

@ -7,7 +7,6 @@ include_directories(
${CSTDLIB_PUBLIC_INCLUDE_DIRS}
${CMAKE_BINARY_DIR}
${CMOCKA_INCLUDE_DIR}
${HTTPBF_PUBLIC_INCLUDE_DIRS}
)
include_directories(${CHECK_INCLUDE_DIRS})
@ -54,10 +53,6 @@ add_cmocka_test(check_csync_update csync_tests/check_csync_update.c ${TEST_TARGE
# encoding
add_cmocka_test(check_encoding_functions encoding_tests/check_encoding.c ${TEST_TARGET_LIBRARIES})
# httpbf
set(TEST_HTTPBF_LIBRARIES ${TEST_TARGET_LIBRARIES} ${NEON_LIBRARIES})
add_cmocka_test(check_httpbf httpbf_tests/hbf_send_test.c ${TEST_HTTPBF_LIBRARIES} )
if(UNIT_TESTING)
INSTALL( FILES "${CMOCKA_LIBRARIES}" DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif(UNIT_TESTING)

View file

@ -1,241 +0,0 @@
/*
* libcsync -- a library to sync a directory with another
*
* Copyright (c) 2013 by Klaas Freitag <freitag@owncloud.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdarg.h>
#include <stddef.h>
#include <setjmp.h>
#include <string.h>
#include <unistd.h>
#include <cmocka.h>
#include "config_test.h"
#if USE_NEON
#include "httpbf.c"
#endif
// A test case that does nothing and succeeds.
static void null_test_success(void **state) {
(void) state;
}
#if USE_NEON
static char* test_file( const char* name ) {
char path[260];
if( ! name ) return 0;
strcpy( path, TESTFILES_DIR);
if(path[strlen(TESTFILES_DIR)-1] != '/')
strcat( path, "/");
strcat( path, name );
return strdup(path);
}
static void test_get_transfer_url( void **state ) {
const char *url = "http://example.org/owncloud";
const char *turl = NULL;
int fd;
Hbf_State hbf_state;
hbf_transfer_t *list = NULL;
(void) state;
list = hbf_init_transfer( url );
assert_non_null( list );
/* open a file */
fd = open( test_file("church.jpg"), O_RDONLY );
assert_true(fd >= 0);
hbf_state = hbf_splitlist(list, fd);
assert_true( hbf_state == HBF_SUCCESS);
assert_true( list->block_cnt == 1);
turl = get_transfer_url( list, 0 );
assert_non_null( turl );
assert_string_equal( url, turl );
hbf_free_transfer( list );
}
static void test_get_transfer_url_bigfile( void **state ) {
const char *url = "http://example.org/big_file";
const char *turl = NULL;
char res[256];
int i, fd;
Hbf_State hbf_state;
hbf_transfer_t *list = NULL;
(void) state;
list = hbf_init_transfer( url );
assert_non_null( list );
list->threshold = list->block_size = (1024*1024); /* block size 1 MB */
/* open a file */
fd = open( test_file("church.jpg"), O_RDONLY );
assert_true(fd >= 0);
hbf_state = hbf_splitlist(list, fd);
assert_true( hbf_state == HBF_SUCCESS);
assert_true( list->block_cnt == 2 );
for( i=0; i < list->block_cnt; i++ ) {
turl = get_transfer_url( list, i );
assert_non_null(turl);
sprintf(res, "%s-chunking-%u-%u-%u", url, list->transfer_id,
list->block_cnt, i );
/* printf( "XX: %s\n", res ); */
assert_string_equal( turl, res );
}
hbf_free_transfer(list);
}
static void test_hbf_init_transfer( void **state ) {
hbf_transfer_t *list = NULL;
const char *url = "http://example.org/owncloud";
(void) state;
list = hbf_init_transfer( url );
assert_non_null( list );
assert_string_equal( url, list->url );
}
/* test with a file size that is not a multiply of the slize size. */
static void test_hbf_splitlist_odd( void **state ){
hbf_transfer_t *list = NULL;
const char *dest_url = "http://localhost/ocm/remote.php/webdav/big/church.jpg";
int prev_id = 0;
int i, fd;
Hbf_State hbf_state;
(void) state;
/* open a file */
fd = open(test_file("church.jpg"), O_RDONLY);
assert_true(fd >= 0);
/* do a smoke test for uniqueness */
for( i=0; i < 10000; i++) {
list = hbf_init_transfer(dest_url);
assert_non_null(list);
usleep(1);
hbf_state = hbf_splitlist(list, fd);
assert_int_not_equal(list->transfer_id, prev_id);
prev_id = list->transfer_id;
hbf_free_transfer(list);
}
list = hbf_init_transfer(dest_url);
assert_non_null(list);
hbf_state = hbf_splitlist(list, fd);
assert_non_null(list);
#ifndef NDEBUG
assert_int_equal(list->calc_size, list->stat_size);
#endif
assert_int_not_equal(list->block_cnt, 0);
assert_true( hbf_state == HBF_SUCCESS);
/* checks on the block list */
if( 1 ) {
int seen_zero_seq = 0;
int prev_seq = -1;
int64_t prev_block_end = -1;
for( i=0; i < list->block_cnt; i++) {
hbf_block_t *blk = list->block_arr[i];
assert_non_null(blk);
if( blk->seq_number == 0 ) seen_zero_seq++;
assert_int_equal(prev_seq, blk->seq_number -1 );
prev_seq = blk->seq_number;
assert_true((prev_block_end+1) == (blk->start));
prev_block_end = blk->start + blk->size;
}
/* Make sure we exactly saw blk->seq_number == 0 exactly one times */
assert_int_equal( seen_zero_seq, 1 );
}
hbf_free_transfer( list );
}
/* test with a file size that is not a multiply of the slize size. */
static void test_hbf_splitlist_zero( void **state ){
hbf_transfer_t *list = NULL;
const char *dest_url = "http://localhost/ocm/remote.php/webdav/big/zerofile.txt";
int fd;
Hbf_State hbf_state;
(void) state;
/* open a file */
fd = open(test_file("zerofile.txt"), O_RDONLY);
assert_true(fd >= 0);
list = hbf_init_transfer(dest_url);
assert_non_null(list);
hbf_state = hbf_splitlist(list, fd);
assert_non_null(list);
assert_int_equal(list->stat_size, 0);
#ifndef NDEBUG
assert_int_equal(list->calc_size, list->stat_size);
#endif
assert_int_equal(list->block_cnt, 1);
assert_true( hbf_state == HBF_SUCCESS);
hbf_free_transfer( list );
}
#endif
int main(void) {
const UnitTest tests[] = {
unit_test(null_test_success),
#if USE_NEON
unit_test(test_hbf_splitlist_odd),
unit_test(test_hbf_splitlist_zero),
unit_test(test_hbf_init_transfer),
unit_test(test_get_transfer_url),
unit_test(test_get_transfer_url_bigfile)
#endif
};
return run_tests(tests);
}

View file

@ -379,15 +379,11 @@ restart_sync:
// ignore hidden files or not
_csync_ctx->ignore_hidden_files = options.ignoreHiddenFiles;
csync_set_module_property(_csync_ctx, "csync_context", _csync_ctx);
if( !options.proxy.isNull() ) {
QString host;
int port = 0;
bool ok;
// Set as default and let overwrite later
csync_set_module_property(_csync_ctx, "proxy_type", (void*) "NoProxy");
QStringList pList = options.proxy.split(':');
if(pList.count() == 3) {
// http: //192.168.178.23 : 8080
@ -397,13 +393,6 @@ restart_sync:
port = pList.at(2).toInt(&ok);
if( !host.isNull() ) {
csync_set_module_property(_csync_ctx, "proxy_type", (void*) "HttpProxy");
csync_set_module_property(_csync_ctx, "proxy_host", host.toUtf8().data());
if( ok && port ) {
csync_set_module_property(_csync_ctx, "proxy_port", (void*) &port);
}
}
QNetworkProxyFactory::setUseSystemConfiguration(false);
QNetworkProxy::setApplicationProxy(QNetworkProxy(QNetworkProxy::HttpProxy, host, port));
}
@ -414,7 +403,6 @@ restart_sync:
url.remove(0, 8);
url = QString("http%1").arg(url);
}
clientProxy.setCSyncProxy(QUrl(url), _csync_ctx);
}
// Exclude lists

View file

@ -76,7 +76,7 @@ void ShibbolethCredentials::setAccount(Account* account)
void ShibbolethCredentials::syncContextPreInit(CSYNC* ctx)
{
csync_set_auth_callback (ctx, handleNeonSSLProblems);
Q_UNUSED(ctx);
}
QByteArray ShibbolethCredentials::prepareCookieData() const
@ -93,7 +93,7 @@ QByteArray ShibbolethCredentials::prepareCookieData() const
void ShibbolethCredentials::syncContextPreStart (CSYNC* ctx)
{
csync_set_module_property(ctx, "session_key", prepareCookieData().data());
Q_UNUSED(ctx);
}
bool ShibbolethCredentials::changed(AbstractCredentials* credentials) const
@ -207,8 +207,6 @@ void ShibbolethCredentials::invalidateToken()
jar->clearSessionCookies();
removeShibCookie();
_shibCookie = QNetworkCookie();
// ### access to ctx missing, but might not be required at all
//csync_set_module_property(ctx, "session_key", "");
}
void ShibbolethCredentials::onShibbolethCookieReceived(const QNetworkCookie& shibCookie)

View file

@ -839,9 +839,7 @@ void Folder::startSync(const QStringList &pathList)
QMetaObject::invokeMethod(this, "slotSyncFinished", Qt::QueuedConnection);
return;
}
_clientProxy.setCSyncProxy(_accountState->account()->url(), _csync_ctx);
} else if (proxyDirty()) {
_clientProxy.setCSyncProxy(_accountState->account()->url(), _csync_ctx);
setProxyDirty(false);
}

View file

@ -117,6 +117,21 @@ void NetworkSettings::loadProxySettings()
void NetworkSettings::loadBWLimitSettings()
{
#if QT_VERSION < QT_VERSION_CHECK(5,3,3)
// QNAM bandwith limiting only works with versions of Qt greater or equal to 5.3.3
// (It needs Qt commits 097b641 and b99fa32)
_ui->noDownloadLimitRadioButton->setChecked(true);
_ui->downloadLimitRadioButton->setEnabled(false);
_ui->noDownloadLimitRadioButton->setEnabled(false);
_ui->autoDownloadLimitRadioButton->setEnabled(false);
_ui->noUploadLimitRadioButton->setChecked(true);
_ui->uploadLimitRadioButton->setEnabled(false);
_ui->noUploadLimitRadioButton->setEnabled(false);
_ui->autoUploadLimitRadioButton->setEnabled(false);
return;
#endif
ConfigFile cfgFile;
int useDownloadLimit = cfgFile.useDownloadLimit();

View file

@ -7,7 +7,6 @@ configure_file( version.h.in "${CMAKE_CURRENT_BINARY_DIR}/version.h" )
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
# csync is required.
include_directories(${CMAKE_SOURCE_DIR}/csync/src
${CMAKE_SOURCE_DIR}/csync/src/httpbf/src
${CMAKE_BINARY_DIR}/csync
${CMAKE_BINARY_DIR}/csync/src
)
@ -73,13 +72,6 @@ set(libsync_SRCS
../3rdparty/certificates/p12topem.cpp
)
if(USE_NEON)
list(APPEND libsync_SRCS
propagator_legacy.cpp
)
add_definitions(-DUSE_NEON)
endif(USE_NEON)
# These headers are installed for libowncloudsync to be used by 3rd party apps
set(owncloudsync_HEADERS
account.h
@ -125,15 +117,6 @@ if(INOTIFY_FOUND)
link_directories(${INOTIFY_LIBRARY_DIR})
endif()
if(NEON_FOUND)
list(APPEND libsync_LINK_TARGETS ${NEON_LIBRARIES} httpbf)
include_directories(${NEON_INCLUDE_DIRS})
if(NEON_WITH_LFS)
add_definitions(-DNE_LFS)
endif()
endif()
if(ZLIB_FOUND)
list(APPEND libsync_LINK_TARGETS ${ZLIB_LIBRARIES})
include_directories(${ZLIB_INCLUDE_DIRS})

View file

@ -115,39 +115,6 @@ const char* ClientProxy::proxyTypeToCStr(QNetworkProxy::ProxyType type)
}
}
void ClientProxy::setCSyncProxy( const QUrl& url, CSYNC *csync_ctx )
{
#ifdef USE_NEON
/* Store proxy */
QList<QNetworkProxy> proxies = QNetworkProxyFactory::proxyForQuery(QNetworkProxyQuery(url));
// We set at least one in Application
Q_ASSERT(proxies.count() > 0);
if (proxies.count() == 0) {
qDebug() << Q_FUNC_INFO << "No proxy!";
return;
}
QNetworkProxy proxy = proxies.first();
if (proxy.type() == QNetworkProxy::NoProxy) {
qDebug() << "Passing NO proxy to csync for" << url.toString();
} else {
qDebug() << "Passing" << proxy.hostName() << "of proxy type " << proxy.type()
<< " to csync for" << url.toString();
}
csync_set_module_property( csync_ctx, "proxy_type", (void*)(proxyTypeToCStr(proxy.type())));
csync_set_module_property( csync_ctx, "proxy_host", proxy.hostName().toUtf8().data());
int proxy_port = proxy.port();
csync_set_module_property( csync_ctx, "proxy_port", &proxy_port );
csync_set_module_property( csync_ctx, "proxy_user", proxy.user().toUtf8().data());
csync_set_module_property( csync_ctx, "proxy_pwd", proxy.password().toUtf8().data());
#else
Q_UNUSED(url);
Q_UNUSED(csync_ctx);
#endif
}
void ClientProxy::lookupSystemProxyAsync(const QUrl &url, QObject *dst, const char *slot)
{
SystemProxyRunnable *runnable = new SystemProxyRunnable(url);

View file

@ -40,7 +40,6 @@ public:
static void lookupSystemProxyAsync(const QUrl &url, QObject *dst, const char *slot);
public slots:
void setCSyncProxy( const QUrl& url, CSYNC *csync_ctx );
void setupQtProxyFromConfig();
private:

View file

@ -30,52 +30,5 @@
namespace OCC
{
int handleNeonSSLProblems(const char* prompt,
char* buf,
size_t /*len*/,
int /*echo*/,
int /*verify*/,
void* userdata)
{
int re = 0;
const QString qPrompt = QString::fromLatin1( prompt ).trimmed();
SyncEngine* engine = reinterpret_cast<SyncEngine*>(userdata);
if( qPrompt.startsWith( QLatin1String("There are problems with the SSL certificate:"))) {
// SSL is requested. If the program came here, the SSL check was done by Qt
// It needs to be checked if the chain is still equal to the one which
// was verified by the user.
const QRegExp regexp("fingerprint: ([\\w\\d:]+)");
bool certOk = false;
int pos = 0;
// This is the set of certificates which QNAM accepted, so we should accept
// them as well
QList<QSslCertificate> certs = engine->account()->sslConfiguration().peerCertificateChain();
while (!certOk && (pos = regexp.indexIn(qPrompt, 1+pos)) != -1) {
QString neon_fingerprint = regexp.cap(1);
foreach( const QSslCertificate& c, certs ) {
QString verified_shasum = Utility::formatFingerprint(c.digest(QCryptographicHash::Sha1).toHex());
qDebug() << "SSL Fingerprint from neon: " << neon_fingerprint << " compared to verified: " << verified_shasum;
if( verified_shasum == neon_fingerprint ) {
certOk = true;
break;
}
}
}
// certOk = false; DEBUG setting, keep disabled!
if( !certOk ) { // Problem!
qstrcpy( buf, "no" );
re = -1;
} else {
qstrcpy( buf, "yes" ); // Certificate is fine!
}
} else {
qDebug() << "Unknown prompt: <" << prompt << ">";
re = -1;
}
return re;
}
} // namespace OCC

View file

@ -21,13 +21,6 @@
namespace OCC
{
OWNCLOUDSYNC_EXPORT int handleNeonSSLProblems(const char* prompt,
char* buf,
size_t len,
int echo,
int verify,
void* userdata);
} // namespace OCC
#endif

View file

@ -35,44 +35,6 @@ using namespace QKeychain;
namespace OCC
{
int getauth(const char *prompt,
char *buf,
size_t len,
int echo,
int verify,
void *userdata)
{
int re = 0;
// ### safe? Not really. If the wizard is run in the main thread, the caccount could change during the sync.
SyncEngine* engine = reinterpret_cast<SyncEngine*>(userdata);
HttpCredentials* http_credentials = qobject_cast<HttpCredentials*>(engine->account()->credentials());
if (!http_credentials) {
qDebug() << "Not a HTTP creds instance!";
return -1;
}
QString qPrompt = QString::fromLatin1( prompt ).trimmed();
QString user = http_credentials->user();
QString pwd = http_credentials->password();
if( qPrompt == QLatin1String("Enter your username:") ) {
// qDebug() << "OOO Username requested!";
qstrncpy( buf, user.toUtf8().constData(), len );
} else if( qPrompt == QLatin1String("Enter your password:") ) {
// qDebug() << "OOO Password requested!";
qstrncpy( buf, pwd.toUtf8().constData(), len );
} else {
if( http_credentials->sslIsTrusted() ) {
qstrcpy( buf, "yes" ); // Certificate is fine!
} else {
re = handleNeonSSLProblems(prompt, buf, len, echo, verify, userdata);
}
}
return re;
}
namespace
{
const char userC[] = "user";
@ -97,31 +59,12 @@ HttpCredentials::HttpCredentials(const QString& user, const QString& password, c
void HttpCredentials::syncContextPreInit (CSYNC* ctx)
{
csync_set_auth_callback (ctx, getauth);
// create a SSL client certificate configuration in CSYNC* ctx
struct csync_client_certs_s clientCerts;
clientCerts.certificatePath = strdup(_certificatePath.toStdString().c_str());
clientCerts.certificatePasswd = strdup(_certificatePasswd.toStdString().c_str());
csync_set_module_property(ctx, "SSLClientCerts", &clientCerts);
free(clientCerts.certificatePath);
free(clientCerts.certificatePasswd);
Q_UNUSED(ctx);
}
void HttpCredentials::syncContextPreStart (CSYNC* ctx)
{
QList<QNetworkCookie> cookies(_account->lastAuthCookies());
QString cookiesAsString;
// Stuff cookies inside csync, then we can avoid the intermediate HTTP 401 reply
// when https://github.com/owncloud/core/pull/4042 is merged.
foreach(QNetworkCookie c, cookies) {
cookiesAsString += c.name();
cookiesAsString += '=';
cookiesAsString += c.value();
cookiesAsString += "; ";
}
csync_set_module_property(ctx, "session_key", cookiesAsString.toLatin1().data());
Q_UNUSED(ctx);
}
bool HttpCredentials::changed(AbstractCredentials* credentials) const

View file

@ -33,41 +33,6 @@ namespace OCC
namespace
{
int getauth(const char *prompt,
char *buf,
size_t len,
int echo,
int verify,
void *userdata)
{
int re = 0;
QMutex mutex;
// ### safe?
TokenCredentials* http_credentials = qobject_cast<TokenCredentials*>(AccountManager::instance()->account()->credentials());
if (!http_credentials) {
qDebug() << "Not a HTTP creds instance!";
return -1;
}
QString qPrompt = QString::fromLatin1( prompt ).trimmed();
QString user = http_credentials->user();
QString pwd = http_credentials->password();
if( qPrompt == QLatin1String("Enter your username:") ) {
// qDebug() << "OOO Username requested!";
QMutexLocker locker( &mutex );
qstrncpy( buf, user.toUtf8().constData(), len );
} else if( qPrompt == QLatin1String("Enter your password:") ) {
QMutexLocker locker( &mutex );
// qDebug() << "OOO Password requested!";
qstrncpy( buf, pwd.toUtf8().constData(), len );
} else {
re = handleNeonSSLProblems(prompt, buf, len, echo, verify, userdata);
}
return re;
}
const char authenticationFailedC[] = "owncloud-authentication-failed";
} // ns
@ -117,12 +82,12 @@ TokenCredentials::TokenCredentials(const QString& user, const QString& password,
void TokenCredentials::syncContextPreInit (CSYNC* ctx)
{
csync_set_auth_callback (ctx, getauth);
Q_UNUSED(ctx);
}
void TokenCredentials::syncContextPreStart (CSYNC* ctx)
{
csync_set_module_property(ctx, "session_key", _token.toUtf8().data());
Q_UNUSED(ctx);
}
bool TokenCredentials::changed(AbstractCredentials* credentials) const

View file

@ -22,9 +22,6 @@
#include "propagateremotemove.h"
#include "propagateremotemkdir.h"
#include "propagatorjobs.h"
#ifdef USE_NEON
#include "propagator_legacy.h"
#endif
#include "configfile.h"
#include "utility.h"
#include "account.h"
@ -193,11 +190,7 @@ bool PropagateItemJob::checkForProblemsWithShared(int httpStatusCode, const QStr
downloadItem->_instruction = CSYNC_INSTRUCTION_SYNC;
}
downloadItem->_direction = SyncFileItem::Down;
#ifdef USE_NEON
newJob = new PropagateDownloadFileLegacy(_propagator, downloadItem);
#else
newJob = new PropagateDownloadFileQNAM(_propagator, downloadItem);
#endif
} else {
// Directories are harder to recover.
// But just re-create the directory, next sync will be able to recover the files
@ -256,15 +249,6 @@ PropagateItemJob* OwncloudPropagator::createJob(const SyncFileItemPtr &item) {
// Should we set the mtime?
return 0;
}
#ifdef USE_NEON
if (useLegacyJobs()) {
if (item->_direction != SyncFileItem::Up) {
return new PropagateDownloadFileLegacy(this, item);
} else {
return new PropagateUploadFileLegacy(this, item);
}
} else
#endif
{
if (item->_direction != SyncFileItem::Up) {
return new PropagateDownloadFileQNAM(this, item);
@ -386,7 +370,7 @@ void OwncloudPropagator::start(const SyncFileItemVector& items)
connect(_rootJob.data(), SIGNAL(finished(SyncFileItem::Status)), this, SLOT(emitFinished()));
connect(_rootJob.data(), SIGNAL(ready()), this, SLOT(scheduleNextJob()), Qt::QueuedConnection);
qDebug() << (useLegacyJobs() ? "Using legacy libneon/HTTP sequential code path" : "Using QNAM/HTTP parallel code path");
qDebug() << "Using QNAM/HTTP parallel code path";
QTimer::singleShot(0, this, SLOT(scheduleNextJob()));
}
@ -406,51 +390,6 @@ bool OwncloudPropagator::isInSharedDirectory(const QString& file)
return re;
}
/**
* Return true if we should use the legacy jobs.
* Some features are not supported by QNAM and therefore we still use the legacy jobs
* for this case.
*/
bool OwncloudPropagator::useLegacyJobs()
{
#ifdef USE_NEON
// Allow an environement variable for debugging
QByteArray env = qgetenv("OWNCLOUD_USE_LEGACY_JOBS");
if (env=="true" || env =="1") {
qDebug() << "Force Legacy Propagator ACTIVATED";
return true;
}
if (_downloadLimit.fetchAndAddAcquire(0) != 0 || _uploadLimit.fetchAndAddAcquire(0) != 0) {
// QNAM bandwith limiting only works with versions of Qt greater or equal to 5.3.3
// (It needs Qt commits 097b641 and b99fa32)
#if QT_VERSION >= QT_VERSION_CHECK(5,3,3)
return false;
#elif QT_VERSION >= QT_VERSION_CHECK(5,0,0)
env = qgetenv("OWNCLOUD_NEW_BANDWIDTH_LIMITING");
if (env=="true" || env =="1") {
qDebug() << "New Bandwidth Limiting Code ACTIVATED";
return false;
}
// Do a runtime check.
// (Poor man's version comparison)
const char *v = qVersion(); // "x.y.z";
if (QLatin1String(v) >= QLatin1String("5.3.3")) {
return false;
} else {
qDebug() << "Use legacy jobs because qt version is only" << v << "while 5.3.3 is needed";
return true;
}
#else
qDebug() << "Use legacy jobs because of Qt4";
return true;
#endif
}
#endif // USE_NEON
return false;
}
int OwncloudPropagator::httpTimeout()
{
static int timeout;

View file

@ -30,11 +30,6 @@
#include "bandwidthmanager.h"
#include "accountfwd.h"
struct hbf_transfer_s;
struct ne_session_s;
struct ne_decompress_s;
typedef struct ne_prop_result_set_s ne_prop_result_set;
namespace OCC {
/** Free disk space threshold below which syncs will abort and not even start.
@ -127,7 +122,6 @@ signals:
/*
* Abstract class to propagate a single item
* (Only used for neon job)
*/
class PropagateItemJob : public PropagatorJob {
Q_OBJECT
@ -260,14 +254,8 @@ class OwncloudPropagator : public QObject {
PropagateItemJob *createJob(const SyncFileItemPtr& item);
QScopedPointer<PropagateDirectory> _rootJob;
bool useLegacyJobs();
public:
/* 'const' because they are accessed by the thread */
QThread* _neonThread;
ne_session_s * const _session;
const QString _localDir; // absolute path to the local directory. ends with '/'
const QString _remoteDir; // path to the root of the remote. ends with '/' (include WebDAV path)
const QString _remoteFolder; // folder. (same as remoteDir but without the WebDAV path)
@ -277,12 +265,10 @@ public:
public:
OwncloudPropagator(AccountPtr account, ne_session_s *session, const QString &localDir,
OwncloudPropagator(AccountPtr account, const QString &localDir,
const QString &remoteDir, const QString &remoteFolder,
SyncJournalDb *progressDb, QThread *neonThread)
: _neonThread(neonThread)
, _session(session)
, _localDir((localDir.endsWith(QChar('/'))) ? localDir : localDir+'/' )
SyncJournalDb *progressDb)
: _localDir((localDir.endsWith(QChar('/'))) ? localDir : localDir+'/' )
, _remoteDir((remoteDir.endsWith(QChar('/'))) ? remoteDir : remoteDir+'/' )
, _remoteFolder((remoteFolder.endsWith(QChar('/'))) ? remoteFolder : remoteFolder+'/' )
, _journal(progressDb)

View file

@ -31,10 +31,6 @@
#include <cmath>
#include <cstring>
#ifdef USE_NEON
#include "propagator_legacy.h"
#endif
#if QT_VERSION < QT_VERSION_CHECK(5, 4, 2)
namespace {
const char owncloudShouldSoftCancelPropertyName[] = "owncloud-should-soft-cancel";
@ -691,15 +687,8 @@ void PropagateUploadFileQNAM::slotPutFinished()
// X-OC-MTime is supported since owncloud 5.0. But not when chunking.
// Normally Owncloud 6 always puts X-OC-MTime
qWarning() << "Server does not support X-OC-MTime" << job->reply()->rawHeader("X-OC-MTime");
#ifdef USE_NEON
PropagatorJob *newJob = new UpdateMTimeAndETagJob(_propagator, _item);
QObject::connect(newJob, SIGNAL(itemCompleted(SyncFileItem, PropagatorJob)),
this, SLOT(finalize(SyncFileItem)));
QMetaObject::invokeMethod(newJob, "start");
return;
#else
// Well, the mtime was not set
#endif
done(SyncFileItem::SoftError, "Server does not support X-OC-MTime");
}
// performance logging

View file

@ -1,735 +0,0 @@
/*
* Copyright (C) by Olivier Goffart <ogoffart@owncloud.com>
* Copyright (C) by Klaas Freitag <freitag@owncloud.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#include "propagator_legacy.h"
#include "owncloudpropagator_p.h"
#include "utility.h"
#include "syncjournaldb.h"
#include "syncjournalfilerecord.h"
#include "filesystem.h"
#include <httpbf.h>
#include <qfile.h>
#include <qdir.h>
#include <qdiriterator.h>
#include <qtemporaryfile.h>
#include <QDebug>
#include <QDateTime>
#include <qstack.h>
#include <QCoreApplication>
#include <neon/ne_basic.h>
#include <neon/ne_socket.h>
#include <neon/ne_session.h>
#include <neon/ne_props.h>
#include <neon/ne_auth.h>
#include <neon/ne_dates.h>
#include <neon/ne_compress.h>
#include <neon/ne_redirect.h>
#include <time.h>
namespace OCC {
static QByteArray get_etag_from_reply(ne_request *req)
{
QByteArray ret = parseEtag(ne_get_response_header(req, "OC-ETag"));
if (ret.isEmpty()) {
ret = parseEtag(ne_get_response_header(req, "ETag"));
}
if (ret.isEmpty()) {
ret = parseEtag(ne_get_response_header(req, "etag"));
}
return ret;
}
bool PropagateNeonJob::updateErrorFromSession(int neon_code, ne_request* req, int ignoreHttpCode)
{
if( neon_code != NE_OK ) {
qDebug("Neon error code was %d", neon_code);
}
QString errorString;
int httpStatusCode = 0;
switch(neon_code) {
case NE_OK: /* Success, but still the possiblity of problems */
if( req ) {
const ne_status *status = ne_get_status(req);
if (status) {
if ( status->klass == 2 || status->code == ignoreHttpCode) {
// Everything is ok, no error.
return false;
}
errorString = QString::fromUtf8( status->reason_phrase );
httpStatusCode = status->code;
_item->_httpErrorCode = httpStatusCode;
}
} else {
errorString = QString::fromUtf8(ne_get_error(_propagator->_session));
httpStatusCode = errorString.mid(0, errorString.indexOf(QChar(' '))).toInt();
_item->_httpErrorCode = httpStatusCode;
if ((httpStatusCode >= 200 && httpStatusCode < 300)
|| (httpStatusCode != 0 && httpStatusCode == ignoreHttpCode)) {
// No error
return false;
}
}
// FIXME: classify the error
done (SyncFileItem::NormalError, errorString);
return true;
case NE_ERROR: /* Generic error; use ne_get_error(session) for message */
errorString = QString::fromUtf8(ne_get_error(_propagator->_session));
// Check if we don't need to ignore that error.
httpStatusCode = errorString.mid(0, errorString.indexOf(QChar(' '))).toInt();
_item->_httpErrorCode = httpStatusCode;
qDebug() << Q_FUNC_INFO << "NE_ERROR" << errorString << httpStatusCode << ignoreHttpCode;
if (ignoreHttpCode && httpStatusCode == ignoreHttpCode)
return false;
done(SyncFileItem::NormalError, errorString);
return true;
case NE_LOOKUP: /* Server or proxy hostname lookup failed */
case NE_AUTH: /* User authentication failed on server */
case NE_PROXYAUTH: /* User authentication failed on proxy */
case NE_CONNECT: /* Could not connect to server */
case NE_TIMEOUT: /* Connection timed out */
done(SyncFileItem::FatalError, QString::fromUtf8(ne_get_error(_propagator->_session)));
return true;
case NE_FAILED: /* The precondition failed */
case NE_RETRY: /* Retry request (ne_end_request ONLY) */
case NE_REDIRECT: /* See ne_redirect.h */
default:
done(SyncFileItem::SoftError, QString::fromUtf8(ne_get_error(_propagator->_session)));
return true;
}
return false;
}
void UpdateMTimeAndETagJob::start()
{
QScopedPointer<char, QScopedPointerPodDeleter> uri(
ne_path_escape((_propagator->_remoteDir + _item->_file).toUtf8()));
if (!updateMTimeAndETag(uri.data(), _item->_modtime))
return;
done(SyncFileItem::Success);
}
void PropagateUploadFileLegacy::start()
{
if (_propagator->_abortRequested.fetchAndAddRelaxed(0))
return;
QFile file(_propagator->getFilePath(_item->_file));
if (!file.open(QIODevice::ReadOnly)) {
done(SyncFileItem::NormalError, file.errorString());
return;
}
QScopedPointer<char, QScopedPointerPodDeleter> uri(
ne_path_escape((_propagator->_remoteDir + _item->_file).toUtf8()));
int attempts = 0;
/*
* do ten tries to upload the file chunked. Check the file size and mtime
* before submitting a chunk and after having submitted the last one.
* If the file has changed, retry.
*/
qDebug() << "** PUT request to" << uri.data();
const SyncJournalDb::UploadInfo progressInfo = _propagator->_journal->getUploadInfo(_item->_file);
do {
Hbf_State state = HBF_SUCCESS;
QScopedPointer<hbf_transfer_t, ScopedPointerHelpers> trans(hbf_init_transfer(uri.data()));
Q_ASSERT(trans);
trans->user_data = this;
hbf_set_log_callback(trans.data(), _log_callback);
hbf_set_abort_callback(trans.data(), _user_want_abort);
trans.data()->chunk_finished_cb = chunk_finished_cb;
static uint chunkSize = qgetenv("OWNCLOUD_CHUNK_SIZE").toUInt();
if (chunkSize > 0) {
trans->block_size = trans->threshold = chunkSize;
}
state = hbf_splitlist(trans.data(), file.handle());
// This is the modtime hbf will announce to the server.
// We don't trust the modtime hbf computes itself via _fstat64
// on windows - hbf may only use it to detect file changes during
// upload.
trans->oc_header_modtime = FileSystem::getModTime(file.fileName());
// If the source file has changed during upload, it is detected and the
// variable _previousFileSize is set accordingly. The propagator waits a
// couple of seconds and retries.
if(_previousFileSize > 0) {
qDebug() << "File size changed underway: " << trans->stat_size - _previousFileSize;
// Report the change of the overall transmission size to the propagator (queued connection because we are in a thread)
QMetaObject::invokeMethod(_propagator, "adjustTotalTransmissionSize", Qt::QueuedConnection,
Q_ARG(qint64, trans->stat_size - _previousFileSize));
// update the item's values to the current from trans. hbf_splitlist does a stat
_item->_size = trans->stat_size;
_item->_modtime = trans->oc_header_modtime;
}
emit progress(*_item, 0);
if (progressInfo._valid) {
if (Utility::qDateTimeToTime_t(progressInfo._modtime) == _item->_modtime) {
trans->start_id = progressInfo._chunk;
trans->transfer_id = progressInfo._transferid;
}
}
ne_set_notifier(_propagator->_session, notify_status_cb, this);
_lastTime.restart();
_lastProgress = 0;
_chunked_done = 0;
_chunked_total_size = _item->_size;
if( state == HBF_SUCCESS ) {
QByteArray previousEtag;
if (!_item->_etag.isEmpty() && _item->_etag != "empty_etag") {
// We add quotes because the owncloud server always adds quotes around the etag, and
// csync_owncloud.c's owncloud_file_id always strips the quotes.
previousEtag = '"' + _item->_etag + '"';
trans->previous_etag = previousEtag.data();
}
_chunked_total_size = trans->stat_size;
qDebug() << "About to upload " << _item->_file << " (" << previousEtag << _item->_size << " bytes )";
/* Transfer all the chunks through the HTTP session using PUT. */
state = hbf_transfer( _propagator->_session, trans.data(), "PUT" );
}
// the file id should only be empty for new files up- or downloaded
QByteArray fid = hbf_transfer_file_id( trans.data() );
if( !fid.isEmpty() ) {
if( !_item->_fileId.isEmpty() && _item->_fileId != fid ) {
qDebug() << "WARN: File ID changed!" << _item->_fileId << fid;
}
_item->_fileId = fid;
}
/* Handle errors. */
if ( state != HBF_SUCCESS ) {
/* If the source file changed during submission, lets try again */
if( state == HBF_SOURCE_FILE_CHANGE ) {
if( attempts++ < 5 ) { /* FIXME: How often do we want to try? */
qDebug("SOURCE file has changed during upload, retry #%d in %d seconds!", attempts, 2*attempts);
Utility::sleep(2*attempts);
if( _previousFileSize == 0 ) {
_previousFileSize = _item->_size;
} else {
_previousFileSize = trans->stat_size;
}
continue;
}
const QString errMsg = tr("Local file changed during sync, syncing once it arrives completely");
done( SyncFileItem::SoftError, errMsg );
} else if( state == HBF_USER_ABORTED ) {
const QString errMsg = tr("Sync was aborted by user.");
done( SyncFileItem::SoftError, errMsg );
} else {
// Other HBF error conditions.
_item->_httpErrorCode = hbf_fail_http_code(trans.data());
if(checkForProblemsWithShared(_item->_httpErrorCode,
tr("The file was edited locally but is part of a read only share. "
"It is restored and your edit is in the conflict file.")))
return;
done(SyncFileItem::NormalError, hbf_error_string(trans.data(), state));
}
return;
}
ne_set_notifier(_propagator->_session, 0, 0);
if( trans->modtime_accepted ) {
_item->_etag = parseEtag(hbf_transfer_etag( trans.data() ));
} else {
if (!updateMTimeAndETag(uri.data(), trans->oc_header_modtime))
return;
}
_propagator->_journal->setFileRecord(SyncJournalFileRecord(*_item, _propagator->getFilePath(_item->_file)));
// Remove from the progress database:
_propagator->_journal->setUploadInfo(_item->_file, SyncJournalDb::UploadInfo());
_propagator->_journal->commit("upload file start");
if (hbf_validate_source_file(trans.data()) == HBF_SOURCE_FILE_CHANGE) {
/* Has the source file changed since the upload ?
* This is different from the previous check because the previous check happens between
* chunks while this one happens when the whole file has been uploaded.
*
* The new etag is already stored in the database in the previous lines so in case of
* crash, we won't have a conflict but we will properly do a new upload
*/
if( attempts++ < 5 ) { /* FIXME: How often do we want to try? */
qDebug("SOURCE file has changed after upload, retry #%d in %d seconds!", attempts, 2*attempts);
Utility::sleep(2*attempts);
continue;
}
// Still the file change error, but we tried a couple of times.
// Ignore this file for now.
// Lets remove the file from the server (at least if it is new) as it is different
// from our file here.
if( _item->_instruction == CSYNC_INSTRUCTION_NEW ) {
QScopedPointer<char, QScopedPointerPodDeleter> uri(
ne_path_escape((_propagator->_remoteDir + _item->_file).toUtf8()));
int rc = ne_delete(_propagator->_session, uri.data());
qDebug() << "Remove the invalid file from server:" << rc;
}
const QString errMsg = tr("Local file changed during sync, syncing once it arrives completely");
done( SyncFileItem::SoftError, errMsg );
return;
}
done(SyncFileItem::Success);
return;
} while( true );
}
void PropagateUploadFileLegacy::chunk_finished_cb(hbf_transfer_s *trans, int chunk, void* userdata)
{
PropagateUploadFileLegacy *that = static_cast<PropagateUploadFileLegacy *>(userdata);
Q_ASSERT(that);
that->_chunked_done += trans->block_arr[chunk]->size;
if (trans->block_cnt > 1) {
SyncJournalDb::UploadInfo pi;
pi._valid = true;
pi._chunk = chunk + 1; // next chunk to start with
pi._transferid = trans->transfer_id;
pi._modtime = Utility::qDateTimeFromTime_t(trans->oc_header_modtime);
that->_propagator->_journal->setUploadInfo(that->_item->_file, pi);
that->_propagator->_journal->commit("Upload info");
}
}
void PropagateUploadFileLegacy::notify_status_cb(void* userdata, ne_session_status status,
const ne_session_status_info* info)
{
PropagateUploadFileLegacy* that = reinterpret_cast<PropagateUploadFileLegacy*>(userdata);
if (status == ne_status_sending && info->sr.total > 0) {
emit that->progress(*that->_item, that->_chunked_done + info->sr.progress);
that->limitBandwidth(that->_chunked_done + info->sr.progress, that->_propagator->_uploadLimit.fetchAndAddAcquire(0));
}
}
static QByteArray parseFileId(ne_request *req) {
QByteArray fileId;
const char *header = ne_get_response_header(req, "OC-FileId");
if( header ) {
fileId = header;
}
return fileId;
}
bool PropagateNeonJob::updateMTimeAndETag(const char* uri, time_t mtime)
{
QByteArray modtime = QByteArray::number(qlonglong(mtime));
ne_propname pname;
pname.nspace = "DAV:";
pname.name = "lastmodified";
ne_proppatch_operation ops[2];
ops[0].name = &pname;
ops[0].type = ne_propset;
ops[0].value = modtime.constData();
ops[1].name = NULL;
int rc = ne_proppatch( _propagator->_session, uri, ops );
Q_UNUSED(rc);
/* FIXME: error handling
* bool error = updateErrorFromSession( rc );
* if( error ) {
* // FIXME: We could not set the mtime. Error or not?
* qDebug() << "PROP-Patching of modified date failed.";
}*/
// get the etag
QScopedPointer<ne_request, ScopedPointerHelpers> req(ne_request_create(_propagator->_session, "HEAD", uri));
int neon_stat = ne_request_dispatch(req.data());
if (updateErrorFromSession(neon_stat, req.data())) {
return false;
} else {
_item->_etag = get_etag_from_reply(req.data());
QByteArray fid = parseFileId(req.data());
if( _item->_fileId.isEmpty() ) {
_item->_fileId = fid;
qDebug() << "FileID was empty, set it to " << _item->_fileId;
} else {
if( !fid.isEmpty() && fid != _item->_fileId ) {
qDebug() << "WARN: FileID seems to have changed: "<< fid << _item->_fileId;
} else {
qDebug() << "FileID is " << _item->_fileId;
}
}
return true;
}
}
void PropagateNeonJob::limitBandwidth(qint64 progress, qint64 bandwidth_limit)
{
if (_propagator->_abortRequested.fetchAndAddRelaxed(0)) {
// Do not limit bandwidth when aborting to speed up the current transfer
return;
}
if (bandwidth_limit > 0) {
int64_t diff = _lastTime.nsecsElapsed() / 1000;
int64_t len = progress - _lastProgress;
if (len > 0 && diff > 0 && (1000000 * len / diff) > bandwidth_limit) {
int64_t wait_time = (1000000 * len / bandwidth_limit) - diff;
if (wait_time > 0) {
//qDebug() << "Limiting bandwidth to " << bandwidth_limit << "KB/s by waiting " << wait_time << " µs; ";
OCC::Utility::usleep(wait_time);
}
}
_lastProgress = progress;
_lastTime.start();
} else if (bandwidth_limit < 0 && bandwidth_limit > -100) {
int64_t diff = _lastTime.nsecsElapsed() / 1000;
if (diff > 0) {
// -bandwidth_limit is the % of bandwidth
int64_t wait_time = -diff * (1 + 100.0 / bandwidth_limit);
if (wait_time > 0) {
OCC::Utility::usleep(qMin(wait_time, int64_t(1000000*10)));
}
}
_lastTime.start();
}
}
int PropagateDownloadFileLegacy::content_reader(void *userdata, const char *buf, size_t len)
{
PropagateDownloadFileLegacy *that = static_cast<PropagateDownloadFileLegacy *>(userdata);
size_t written = 0;
if (that->_propagator->_abortRequested.fetchAndAddRelaxed(0)) {
ne_set_error(that->_propagator->_session, "%s", tr("Sync was aborted by user.").toUtf8().data());
return NE_ERROR;
}
if(buf) {
written = that->_file->write(buf, len);
if( len != written || that->_file->error() != QFile::NoError) {
qDebug() << "WRN: content_reader wrote wrong num of bytes:" << len << "," << written;
return NE_ERROR;
}
return NE_OK;
}
return NE_ERROR;
}
/*
* This hook is called after the response is here from the server, but before
* the response body is parsed. It decides if the response is compressed and
* if it is it installs the compression reader accordingly.
* If the response is not compressed, the normal response body reader is installed.
*/
void PropagateDownloadFileLegacy::install_content_reader( ne_request *req, void *userdata, const ne_status *status )
{
PropagateDownloadFileLegacy *that = static_cast<PropagateDownloadFileLegacy *>(userdata);
Q_UNUSED(status);
if( !that ) {
qDebug("Error: install_content_reader called without valid write context!");
return;
}
if( ne_get_status(req)->klass != 2 ) {
qDebug() << "Request class != 2, aborting.";
ne_add_response_body_reader( req, do_not_accept,
do_not_download_content_reader,
(void*) that );
return;
}
QByteArray reason_phrase = ne_get_status(req)->reason_phrase;
if(reason_phrase == QByteArray("Connection established")) {
ne_add_response_body_reader( req, ne_accept_2xx,
content_reader,
(void*) that );
return;
}
QByteArray etag = get_etag_from_reply(req);
if (etag.isEmpty()) {
qDebug() << Q_FUNC_INFO << "No E-Tag reply by server, considering it invalid" << ne_get_response_header(req, "etag");
that->abortTransfer(req, tr("No E-Tag received from server, check Proxy/Gateway"));
return;
} else if (!that->_expectedEtagForResume.isEmpty() && that->_expectedEtagForResume != etag) {
qDebug() << Q_FUNC_INFO << "We received a different E-Tag for resuming!"
<< QString::fromLatin1(that->_expectedEtagForResume.data()) << "vs"
<< QString::fromLatin1(etag.data());
that->abortTransfer(req, tr("We received a different E-Tag for resuming. Retrying next time."));
return;
}
quint64 start = 0;
QByteArray ranges = ne_get_response_header(req, "content-range");
if (!ranges.isEmpty()) {
QRegExp rx("bytes (\\d+)-");
if (rx.indexIn(ranges) >= 0) {
start = rx.cap(1).toULongLong();
}
}
if (start != that->_resumeStart) {
qDebug() << Q_FUNC_INFO << "Wrong content-range: "<< ranges << " while expecting start was" << that->_resumeStart;
if (start == 0) {
// device doesn't support range, just try again from scratch
that->_file->close();
if (!that->_file->open(QIODevice::WriteOnly)) {
that->abortTransfer(req, that->_file->errorString());
return;
}
} else {
that->abortTransfer(req, tr("Server returned wrong content-range"));
return;
}
}
const char *enc = ne_get_response_header( req, "Content-Encoding" );
qDebug("Content encoding ist <%s> with status %d", enc ? enc : "empty",
status ? status->code : -1 );
if( enc == QLatin1String("gzip") ) {
that->_decompress.reset(ne_decompress_reader( req, ne_accept_2xx,
content_reader, /* reader callback */
that )); /* userdata */
} else {
ne_add_response_body_reader( req, ne_accept_2xx,
content_reader,
(void*) that );
}
}
void PropagateDownloadFileLegacy::abortTransfer(ne_request* req, const QString& error)
{
errorString = error;
ne_set_error(_propagator->_session, "%s", errorString.toUtf8().data());
ne_add_response_body_reader( req, do_not_accept,
do_not_download_content_reader,
this);
}
void PropagateDownloadFileLegacy::notify_status_cb(void* userdata, ne_session_status status,
const ne_session_status_info* info)
{
PropagateDownloadFileLegacy* that = reinterpret_cast<PropagateDownloadFileLegacy*>(userdata);
if (status == ne_status_recving && info->sr.total > 0) {
emit that->progress(*that->_item, info->sr.progress );
that->limitBandwidth(info->sr.progress, that->_propagator->_downloadLimit.fetchAndAddAcquire(0));
}
}
extern QString makeConflictFileName(const QString &fn, const QDateTime &dt); // propagatedownload.cpp
void PropagateDownloadFileLegacy::start()
{
if (_propagator->_abortRequested.fetchAndAddRelaxed(0))
return;
// do a case clash check.
if( _propagator->localFileNameClash(_item->_file) ) {
done( SyncFileItem::NormalError, tr("File %1 can not be downloaded because of a local file name clash!")
.arg(QDir::toNativeSeparators(_item->_file)) );
return;
}
emit progress(*_item, 0);
QString tmpFileName;
const SyncJournalDb::DownloadInfo progressInfo = _propagator->_journal->getDownloadInfo(_item->_file);
if (progressInfo._valid) {
// if the etag has changed meanwhile, remove the already downloaded part.
if (progressInfo._etag != _item->_etag) {
QFile::remove(_propagator->getFilePath(progressInfo._tmpfile));
_propagator->_journal->setDownloadInfo(_item->_file, SyncJournalDb::DownloadInfo());
} else {
tmpFileName = progressInfo._tmpfile;
_expectedEtagForResume = progressInfo._etag;
}
}
if (tmpFileName.isEmpty()) {
tmpFileName = _item->_file;
//add a dot at the begining of the filename to hide the file.
int slashPos = tmpFileName.lastIndexOf('/');
tmpFileName.insert(slashPos+1, '.');
//add the suffix
tmpFileName += ".~" + QString::number(uint(qrand()), 16);
}
QFile tmpFile(_propagator->getFilePath(tmpFileName));
_file = &tmpFile;
if (!tmpFile.open(QIODevice::Append | QIODevice::Unbuffered)) {
done(SyncFileItem::NormalError, tmpFile.errorString());
return;
}
FileSystem::setFileHidden(tmpFile.fileName(), true);
{
SyncJournalDb::DownloadInfo pi;
pi._etag = _item->_etag;
pi._tmpfile = tmpFileName;
pi._valid = true;
_propagator->_journal->setDownloadInfo(_item->_file, pi);
_propagator->_journal->commit("download file start");
}
if (!_item->_directDownloadUrl.isEmpty()) {
qDebug() << Q_FUNC_INFO << "Direct download URL" << _item->_directDownloadUrl << "not supported with legacy propagator, will go via ownCloud server";
}
/* actually do the request */
int retry = 0;
QScopedPointer<char, QScopedPointerPodDeleter> uri(
ne_path_escape((_propagator->_remoteDir + _item->_file).toUtf8()));
do {
QScopedPointer<ne_request, ScopedPointerHelpers> req(ne_request_create(_propagator->_session, "GET", uri.data()));
/* Allow compressed content by setting the header */
ne_add_request_header( req.data(), "Accept-Encoding", "gzip" );
if (tmpFile.size() > 0) {
quint64 done = tmpFile.size();
if (done == _item->_size) {
qDebug() << "File is already complete, no need to download";
break;
}
QByteArray rangeRequest = "bytes=" + QByteArray::number(done) +'-';
ne_add_request_header(req.data(), "Range", rangeRequest.constData());
ne_add_request_header(req.data(), "Accept-Ranges", "bytes");
qDebug() << "Retry with range " << rangeRequest;
_resumeStart = done;
}
/* hook called before the content is parsed to set the correct reader,
* either the compressed- or uncompressed reader.
*/
ne_hook_post_headers( _propagator->_session, install_content_reader, this);
ne_set_notifier(_propagator->_session, notify_status_cb, this);
_lastProgress = 0;
_lastTime.start();
int neon_stat = ne_request_dispatch(req.data());
_decompress.reset(); // Destroy the decompress after the request has been dispatched.
/* delete the hook again, otherwise they get chained as they are with the session */
ne_unhook_post_headers( _propagator->_session, install_content_reader, this );
ne_set_notifier(_propagator->_session, 0, 0);
if (neon_stat == NE_TIMEOUT && (++retry) < 3) {
continue;
}
// This one is set by install_content_reader if e.g. there is no E-Tag
if (!errorString.isEmpty()) {
// don't keep the temporary file as the file downloaded so far is invalid
tmpFile.close();
tmpFile.remove();
_propagator->_journal->setDownloadInfo(_item->_file, SyncJournalDb::DownloadInfo());
done(SyncFileItem::SoftError, errorString);
return;
}
// This one is set by neon
if( updateErrorFromSession(neon_stat, req.data() ) ) {
qDebug("Error GET: Neon: %d", neon_stat);
if (tmpFile.size() == 0) {
// don't keep the temporary file if it is empty.
tmpFile.close();
tmpFile.remove();
_propagator->_journal->setDownloadInfo(_item->_file, SyncJournalDb::DownloadInfo());
}
return;
}
_item->_etag = get_etag_from_reply(req.data());
break;
} while (1);
tmpFile.close();
tmpFile.flush();
QString fn = _propagator->getFilePath(_item->_file);
bool isConflict = _item->_instruction == CSYNC_INSTRUCTION_CONFLICT
&& !FileSystem::fileEquals(fn, tmpFile.fileName()); // compare the files to see if there was an actual conflict.
//In case of conflict, make a backup of the old file
if (isConflict) {
auto conflictDate = FileSystem::fileExists(fn) ? FileSystem::getModTime(fn) : _item->_modtime;
QString conflictFileName = makeConflictFileName(fn, Utility::qDateTimeFromTime_t(conflictDate));
QString renameError;
if (!FileSystem::rename(fn, conflictFileName, &renameError)) {
//If the rename fails, don't replace it.
done(SyncFileItem::NormalError, renameError);
return;
}
}
QFileInfo existingFile(fn);
if(existingFile.exists() && existingFile.permissions() != tmpFile.permissions()) {
tmpFile.setPermissions(existingFile.permissions());
}
FileSystem::setFileHidden(tmpFile.fileName(), false);
QString error;
_propagator->addTouchedFile(fn);
const qint64 expectedFileSize = _item->log._other_size;
const time_t expectedFileMtime = _item->log._other_modtime;
if (!FileSystem::renameReplace(tmpFile.fileName(), fn,
expectedFileSize, expectedFileMtime,
&error)) {
done(SyncFileItem::NormalError, error);
return;
}
FileSystem::setModTime(fn, _item->_modtime);
_propagator->_journal->setFileRecord(SyncJournalFileRecord(*_item, fn));
_propagator->_journal->setDownloadInfo(_item->_file, SyncJournalDb::DownloadInfo());
_propagator->_journal->commit("download file start2");
done(isConflict ? SyncFileItem::Conflict : SyncFileItem::Success);
}
}

View file

@ -1,161 +0,0 @@
/*
* Copyright (C) by Olivier Goffart <ogoffart@owncloud.com>
* Copyright (C) by Klaas Freitag <freitag@owncloud.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
#pragma once
#include "propagatorjobs.h"
#include <httpbf.h>
#include <neon/ne_compress.h>
struct hbf_transfer_s;
struct ne_session_s;
typedef struct ne_prop_result_set_s ne_prop_result_set;
namespace OCC {
/* Helper for QScopedPointer<>, to be used as the deleter.
* QScopePointer will call the right overload of cleanup for the pointer it holds
*/
struct ScopedPointerHelpers {
static inline void cleanup(hbf_transfer_t *pointer) { if (pointer) hbf_free_transfer(pointer); }
static inline void cleanup(ne_request *pointer) { if (pointer) ne_request_destroy(pointer); }
static inline void cleanup(ne_decompress *pointer) { if (pointer) ne_decompress_destroy(pointer); }
// static inline void cleanup(ne_propfind_handler *pointer) { if (pointer) ne_propfind_destroy(pointer); }
};
/**
* @brief Abstract class for neon job. Lives in the neon thread
* @ingroup libsync
*/
class PropagateNeonJob : public PropagateItemJob {
Q_OBJECT
protected:
/* Issue a PROPPATCH and PROPFIND to update the mtime, and fetch the etag
* Return true in case of success, and false if the PROPFIND failed and the
* error has been reported
*/
bool updateMTimeAndETag(const char *uri, time_t);
/* fetch the error code and string from the session
in case of error, calls done with the error and returns true.
If the HTTP error code is ignoreHTTPError, the error is ignored
*/
bool updateErrorFromSession(int neon_code = 0, ne_request *req = 0, int ignoreHTTPError = 0);
/*
* to be called by the progress callback and will wait the amount of time needed.
*/
void limitBandwidth(qint64 progress, qint64 limit);
QElapsedTimer _lastTime;
qint64 _lastProgress;
int _httpStatusCode;
public:
PropagateNeonJob(OwncloudPropagator* propagator, const SyncFileItemPtr &item)
: PropagateItemJob(propagator, item), _lastProgress(0), _httpStatusCode(0) {
moveToThread(propagator->_neonThread);
}
JobParallelism parallelism() Q_DECL_OVERRIDE { return WaitForFinished; }
};
/**
* @brief The UpdateMTimeAndETagJob class
* @ingroup libsync
*/
class UpdateMTimeAndETagJob : public PropagateNeonJob {
Q_OBJECT
public:
UpdateMTimeAndETagJob (OwncloudPropagator* propagator, const SyncFileItemPtr& item) : PropagateNeonJob(propagator, item) {}
void start() Q_DECL_OVERRIDE;
};
/**
* @brief The PropagateUploadFileLegacy class
* @ingroup libsync
*/
class PropagateUploadFileLegacy: public PropagateNeonJob {
Q_OBJECT
public:
explicit PropagateUploadFileLegacy(OwncloudPropagator* propagator,const SyncFileItemPtr& item)
: PropagateNeonJob(propagator, item)
, _chunked_done(0), _chunked_total_size(0), _previousFileSize(0) {}
void start() Q_DECL_OVERRIDE;
private:
// Log callback for httpbf
static void _log_callback(const char *func, const char *text, void*)
{
qDebug() << " " << func << text;
}
// abort callback for httpbf
static int _user_want_abort(void *userData)
{
return static_cast<PropagateUploadFileLegacy *>(userData)->_propagator->_abortRequested.fetchAndAddRelaxed(0);
}
// callback from httpbf when a chunk is finished
static void chunk_finished_cb(hbf_transfer_s *trans, int chunk, void* userdata);
static void notify_status_cb(void* userdata, ne_session_status status,
const ne_session_status_info* info);
qint64 _chunked_done; // amount of bytes already sent with the previous chunks
qint64 _chunked_total_size; // total size of the whole file
qint64 _previousFileSize; // In case the file size has changed during upload, this is the previous one.
};
class PropagateDownloadFileLegacy: public PropagateNeonJob {
Q_OBJECT
public:
explicit PropagateDownloadFileLegacy(OwncloudPropagator* propagator,const SyncFileItemPtr& item)
: PropagateNeonJob(propagator, item), _file(0), _resumeStart(0) {}
void start() Q_DECL_OVERRIDE;
private:
QFile *_file;
QScopedPointer<ne_decompress, ScopedPointerHelpers> _decompress;
QString errorString;
QByteArray _expectedEtagForResume;
quint64 _resumeStart;
static int do_not_accept (void *userdata, ne_request *req, const ne_status *st)
{
Q_UNUSED(userdata); Q_UNUSED(req); Q_UNUSED(st);
return 0; // ignore this response
}
static int do_not_download_content_reader(void *userdata, const char *buf, size_t len)
{
Q_UNUSED(userdata); Q_UNUSED(buf); Q_UNUSED(len);
return NE_ERROR;
}
// neon hooks:
static int content_reader(void *userdata, const char *buf, size_t len);
static void install_content_reader( ne_request *req, void *userdata, const ne_status *status );
static void notify_status_cb(void* userdata, ne_session_status status,
const ne_session_status_info* info);
/** To be called from install_content_reader if we want to abort the transfer */
void abortTransfer(ne_request *req, const QString &error);
};
}

View file

@ -47,9 +47,6 @@
#include <QElapsedTimer>
#include <qtextcodec.h>
#ifdef USE_NEON
extern "C" int owncloud_commit(CSYNC* ctx);
#endif
extern "C" const char *csync_instruction_str(enum csync_instructions_e instr);
namespace OCC {
@ -664,11 +661,6 @@ void SyncEngine::startSync()
csync_set_userdata(_csync_ctx, this);
_account->credentials()->syncContextPreStart(_csync_ctx);
// csync_set_auth_callback( _csync_ctx, getauth );
//csync_set_log_level( 11 ); don't set the loglevel here, it shall be done by folder.cpp or owncloudcmd.cpp
int timeout = OwncloudPropagator::httpTimeout();
csync_set_module_property(_csync_ctx, "timeout", &timeout);
_stopWatch.start();
qDebug() << "#### Discovery start #################################################### >>";
@ -799,12 +791,6 @@ void SyncEngine::slotDiscoveryJobFinished(int discoveryResult)
}
}
// FIXME: The propagator could create his session in propagator_legacy.cpp
// There's no reason to keep csync_owncloud.c around
ne_session_s *session = 0;
// that call to set property actually is a get which will return the session
csync_set_module_property(_csync_ctx, "get_dav_session", &session);
// post update phase script: allow to tweak stuff by a custom script in debug mode.
if( !qgetenv("OWNCLOUD_POST_UPDATE_SCRIPT").isEmpty() ) {
#ifndef NDEBUG
@ -821,7 +807,7 @@ void SyncEngine::slotDiscoveryJobFinished(int discoveryResult)
_journal->commit("post treewalk");
_propagator = QSharedPointer<OwncloudPropagator>(
new OwncloudPropagator (_account, session, _localPath, _remoteUrl, _remotePath, _journal, &_thread));
new OwncloudPropagator (_account, _localPath, _remoteUrl, _remotePath, _journal));
connect(_propagator.data(), SIGNAL(itemCompleted(const SyncFileItem &, const PropagatorJob &)),
this, SLOT(slotItemCompleted(const SyncFileItem &, const PropagatorJob &)));
connect(_propagator.data(), SIGNAL(progress(const SyncFileItem &,quint64)),
@ -912,11 +898,6 @@ void SyncEngine::finalize()
_thread.quit();
_thread.wait();
#ifdef USE_NEON
// De-init the neon HTTP(S) connections
owncloud_commit(_csync_ctx);
#endif
csync_commit(_csync_ctx);
qDebug() << "CSync run took " << _stopWatch.addLapTime(QLatin1String("Sync Finished"));

View file

@ -1,5 +1,5 @@
include_directories(${CMAKE_BINARY_DIR}/csync ${CMAKE_BINARY_DIR}/csync/src ${CMAKE_BINARY_DIR}/src)
include_directories(${CMAKE_SOURCE_DIR}/csync/src/ ${CMAKE_SOURCE_DIR}/csync/src/httpbf/src)
include_directories(${CMAKE_SOURCE_DIR}/csync/src/)
include_directories(${CMAKE_SOURCE_DIR}/csync/src/std ${CMAKE_SOURCE_DIR}/src)
include(owncloud_add_test.cmake)