Backed out updated libcurl.

This commit is contained in:
Asher Baker 2013-03-18 00:55:59 +00:00
parent 9b65900df0
commit e84f6e4afd
1511 changed files with 161513 additions and 213368 deletions

View File

@ -22,27 +22,27 @@ def BuildCURL():
if AMBuild.target['platform'] == 'darwin':
env['LDFLAGS'] = '-mmacosx-version-min=10.5'
else:
env['CPPFLAGS'] += ' -D_GNU_SOURCE'
env['CFLAGS'] += ' -D_GNU_SOURCE'
curl.AddCommand(command.DirectCommand(argv = args, env = env))
curl.AddCommand(command.ShellCommand('cd lib && make'))
else:
args = ''
projpath = os.path.join(AMBuild.sourceFolder, 'extensions', 'curl', 'curl-src', 'lib',
'libcurl.vcproj')
'build_libcurl.vcproj')
try:
subprocess.Popen('vcbuild')
except:
xprojpath = os.path.join(AMBuild.sourceFolder, 'extensions', 'curl', 'curl-src', 'lib',
'libcurl.vcxproj')
'build_libcurl.vcxproj')
if not os.path.isfile(xprojpath):
curl.AddCommand(command.DirectCommand(['vcupgrade', projpath]))
args = ['msbuild', xprojpath, '/p:Configuration=Release']
args = ['msbuild', xprojpath, '/p:Configuration=LIB Release']
if not args:
args = ['vcbuild', projpath, 'Release']
args = ['vcbuild', projpath, 'LIB Release']
curl.AddCommand(command.DirectCommand(args))
#die "Unable to find libcurl.lib!\n" unless (-f "Release\\libcurl.lib");
#die "Unable to find libcurl.lib!\n" unless (-f "LIB-Release\\libcurl.lib");
BuildCURL()
@ -75,7 +75,7 @@ elif AMBuild.target['platform'] == 'windows':
'curl',
'curl-src',
'lib',
'Release',
'LIB-Release',
'libcurl.lib')
if os.path.isfile(path):
binary.RelinkIfNewer(path)

View File

@ -1,112 +0,0 @@
# Google Android makefile for curl and libcurl
#
# This file can be used when building curl using the full Android source
# release or the NDK. Most users do not want or need to do this; please
# instead read the Android section in docs/INSTALL for alternate
# methods.
#
# Place the curl source (including this makefile) into external/curl/ in the
# Android source tree. Then build them with 'make curl' or just 'make libcurl'
# from the Android root. Tested with Android versions 1.5, 2.1-2.3
#
# Note: you must first create a curl_config.h file by running configure in the
# Android environment. The only way I've found to do this is tricky. Perform a
# normal Android build with libcurl in the source tree, providing the target
# "showcommands" to make. The build will eventually fail (because curl_config.h
# doesn't exist yet), but the compiler commands used to build curl will be
# shown. Now, from the external/curl/ directory, run curl's normal configure
# command with flags that match what Android itself uses. This will mean
# putting the compiler directory into the PATH, putting the -I, -isystem and
# -D options into CPPFLAGS, putting the -W, -m, -f, -O and -nostdlib options
# into CFLAGS, and putting the -Wl, -L and -l options into LIBS, along with the
# path to the files libgcc.a, crtbegin_dynamic.o, and ccrtend_android.o.
# Remember that the paths must be absolute since you will not be running
# configure from the same directory as the Android make. The normal
# cross-compiler options must also be set. Note that the -c, -o, -MD and
# similar flags must not be set.
#
# To see all the LIBS options, you'll need to do the "showcommands" trick on an
# executable that's already buildable and watch what flags Android uses to link
# it (dhcpcd is a good choice to watch). You'll also want to add -L options to
# LIBS that point to the out/.../obj/lib/ and out/.../obj/system/lib/
# directories so that additional libraries can be found and used by curl.
#
# The end result will be a configure command that looks something like this
# (the environment variable A is set to the Android root path which makes the
# command shorter):
#
# A=`realpath ../..` && \
# PATH="$A/prebuilt/linux-x86/toolchain/arm-eabi-X/bin:$PATH" \
# ./configure --host=arm-linux CC=arm-eabi-gcc \
# CPPFLAGS="-I $A/system/core/include ..." \
# CFLAGS="-nostdlib -fno-exceptions -Wno-multichar ..." \
# LIBS="$A/prebuilt/linux-x86/toolchain/arm-eabi-X/lib/gcc/arm-eabi/X\
# /interwork/libgcc.a ..."
#
# Finally, copy the file COPYING to NOTICE so that the curl license gets put
# into the right place (but see the note about this below).
#
# Dan Fandrich
# November 2011
LOCAL_PATH:= $(call my-dir)
common_CFLAGS := -Wpointer-arith -Wwrite-strings -Wunused -Winline -Wnested-externs -Wmissing-declarations -Wmissing-prototypes -Wno-long-long -Wfloat-equal -Wno-multichar -Wsign-compare -Wno-format-nonliteral -Wendif-labels -Wstrict-prototypes -Wdeclaration-after-statement -Wno-system-headers -DHAVE_CONFIG_H
#########################
# Build the libcurl library
include $(CLEAR_VARS)
include $(LOCAL_PATH)/lib/Makefile.inc
CURL_HEADERS := \
curlbuild.h \
curl.h \
curlrules.h \
curlver.h \
easy.h \
mprintf.h \
multi.h \
stdcheaders.h \
typecheck-gcc.h
LOCAL_SRC_FILES := $(addprefix lib/,$(CSOURCES))
LOCAL_C_INCLUDES += $(LOCAL_PATH)/include/
LOCAL_CFLAGS += $(common_CFLAGS)
LOCAL_COPY_HEADERS_TO := libcurl/curl
LOCAL_COPY_HEADERS := $(addprefix include/curl/,$(CURL_HEADERS))
LOCAL_MODULE:= libcurl
LOCAL_MODULE_TAGS := optional
# Copy the licence to a place where Android will find it.
# Actually, this doesn't quite work because the build system searches
# for NOTICE files before it gets to this point, so it will only be seen
# on subsequent builds.
ALL_PREBUILT += $(LOCAL_PATH)/NOTICE
$(LOCAL_PATH)/NOTICE: $(LOCAL_PATH)/COPYING | $(ACP)
$(copy-file-to-target)
include $(BUILD_STATIC_LIBRARY)
#########################
# Build the curl binary
include $(CLEAR_VARS)
include $(LOCAL_PATH)/src/Makefile.inc
LOCAL_SRC_FILES := $(addprefix src/,$(CURL_CFILES))
LOCAL_MODULE := curl
LOCAL_MODULE_TAGS := optional
LOCAL_STATIC_LIBRARIES := libcurl
LOCAL_SYSTEM_SHARED_LIBRARIES := libc
LOCAL_C_INCLUDES += $(LOCAL_PATH)/include $(LOCAL_PATH)/lib
# This may also need to include $(CURLX_ONES) in order to correctly link
# if libcurl is changed to be built as a dynamic library
LOCAL_CFLAGS += $(common_CFLAGS)
include $(BUILD_EXECUTABLE)

File diff suppressed because it is too large Load Diff

View File

@ -1,2 +0,0 @@
@CMAKE_CONFIGURABLE_FILE_CONTENT@

View File

@ -1,75 +0,0 @@
# - Check if the source code provided in the SOURCE argument compiles.
# CURL_CHECK_C_SOURCE_COMPILES(SOURCE VAR)
# - macro which checks if the source code compiles
# SOURCE - source code to try to compile
# VAR - variable to store whether the source code compiled
#
# The following variables may be set before calling this macro to
# modify the way the check is run:
#
# CMAKE_REQUIRED_FLAGS = string of compile command line flags
# CMAKE_REQUIRED_DEFINITIONS = list of macros to define (-DFOO=bar)
# CMAKE_REQUIRED_INCLUDES = list of include directories
# CMAKE_REQUIRED_LIBRARIES = list of libraries to link
macro(CURL_CHECK_C_SOURCE_COMPILES SOURCE VAR)
if("${VAR}" MATCHES "^${VAR}$" OR "${VAR}" MATCHES "UNKNOWN")
set(message "${VAR}")
# If the number of arguments is greater than 2 (SOURCE VAR)
if(${ARGC} GREATER 2)
# then add the third argument as a message
set(message "${ARGV2} (${VAR})")
endif(${ARGC} GREATER 2)
set(MACRO_CHECK_FUNCTION_DEFINITIONS
"-D${VAR} ${CMAKE_REQUIRED_FLAGS}")
if(CMAKE_REQUIRED_LIBRARIES)
set(CURL_CHECK_C_SOURCE_COMPILES_ADD_LIBRARIES
"-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}")
else(CMAKE_REQUIRED_LIBRARIES)
set(CURL_CHECK_C_SOURCE_COMPILES_ADD_LIBRARIES)
endif(CMAKE_REQUIRED_LIBRARIES)
if(CMAKE_REQUIRED_INCLUDES)
set(CURL_CHECK_C_SOURCE_COMPILES_ADD_INCLUDES
"-DINCLUDE_DIRECTORIES:STRING=${CMAKE_REQUIRED_INCLUDES}")
else(CMAKE_REQUIRED_INCLUDES)
set(CURL_CHECK_C_SOURCE_COMPILES_ADD_INCLUDES)
endif(CMAKE_REQUIRED_INCLUDES)
set(src "")
foreach(def ${EXTRA_DEFINES})
set(src "${src}#define ${def} 1\n")
endforeach(def)
foreach(inc ${HEADER_INCLUDES})
set(src "${src}#include <${inc}>\n")
endforeach(inc)
set(src "${src}\nint main() { ${SOURCE} ; return 0; }")
set(CMAKE_CONFIGURABLE_FILE_CONTENT "${src}")
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CMake/CMakeConfigurableFile.in
"${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/src.c"
IMMEDIATE)
message(STATUS "Performing Test ${message}")
try_compile(${VAR}
${CMAKE_BINARY_DIR}
${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/src.c
COMPILE_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS}
CMAKE_FLAGS -DCOMPILE_DEFINITIONS:STRING=${MACRO_CHECK_FUNCTION_DEFINITIONS}
"${CURL_CHECK_C_SOURCE_COMPILES_ADD_LIBRARIES}"
"${CURL_CHECK_C_SOURCE_COMPILES_ADD_INCLUDES}"
OUTPUT_VARIABLE OUTPUT)
if(${VAR})
set(${VAR} 1 CACHE INTERNAL "Test ${message}")
message(STATUS "Performing Test ${message} - Success")
file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log
"Performing C SOURCE FILE Test ${message} succeded with the following output:\n"
"${OUTPUT}\n"
"Source file was:\n${src}\n")
else(${VAR})
message(STATUS "Performing Test ${message} - Failed")
set(${VAR} "" CACHE INTERNAL "Test ${message}")
file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
"Performing C SOURCE FILE Test ${message} failed with the following output:\n"
"${OUTPUT}\n"
"Source file was:\n${src}\n")
endif(${VAR})
endif("${VAR}" MATCHES "^${VAR}$" OR "${VAR}" MATCHES "UNKNOWN")
endmacro(CURL_CHECK_C_SOURCE_COMPILES)

View File

@ -1,83 +0,0 @@
# - Check if the source code provided in the SOURCE argument compiles and runs.
# CURL_CHECK_C_SOURCE_RUNS(SOURCE VAR)
# - macro which checks if the source code runs
# SOURCE - source code to try to compile
# VAR - variable to store size if the type exists.
#
# The following variables may be set before calling this macro to
# modify the way the check is run:
#
# CMAKE_REQUIRED_FLAGS = string of compile command line flags
# CMAKE_REQUIRED_DEFINITIONS = list of macros to define (-DFOO=bar)
# CMAKE_REQUIRED_INCLUDES = list of include directories
# CMAKE_REQUIRED_LIBRARIES = list of libraries to link
macro(CURL_CHECK_C_SOURCE_RUNS SOURCE VAR)
if("${VAR}" MATCHES "^${VAR}$" OR "${VAR}" MATCHES "UNKNOWN")
set(message "${VAR}")
# If the number of arguments is greater than 2 (SOURCE VAR)
if(${ARGC} GREATER 2)
# then add the third argument as a message
set(message "${ARGV2} (${VAR})")
endif(${ARGC} GREATER 2)
set(MACRO_CHECK_FUNCTION_DEFINITIONS
"-D${VAR} ${CMAKE_REQUIRED_FLAGS}")
if(CMAKE_REQUIRED_LIBRARIES)
set(CURL_CHECK_C_SOURCE_COMPILES_ADD_LIBRARIES
"-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}")
else(CMAKE_REQUIRED_LIBRARIES)
set(CURL_CHECK_C_SOURCE_COMPILES_ADD_LIBRARIES)
endif(CMAKE_REQUIRED_LIBRARIES)
if(CMAKE_REQUIRED_INCLUDES)
set(CURL_CHECK_C_SOURCE_COMPILES_ADD_INCLUDES
"-DINCLUDE_DIRECTORIES:STRING=${CMAKE_REQUIRED_INCLUDES}")
else(CMAKE_REQUIRED_INCLUDES)
set(CURL_CHECK_C_SOURCE_COMPILES_ADD_INCLUDES)
endif(CMAKE_REQUIRED_INCLUDES)
set(src "")
foreach(def ${EXTRA_DEFINES})
set(src "${src}#define ${def} 1\n")
endforeach(def)
foreach(inc ${HEADER_INCLUDES})
set(src "${src}#include <${inc}>\n")
endforeach(inc)
set(src "${src}\nint main() { ${SOURCE} ; return 0; }")
set(CMAKE_CONFIGURABLE_FILE_CONTENT "${src}")
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CMake/CMakeConfigurableFile.in
"${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/src.c"
IMMEDIATE)
message(STATUS "Performing Test ${message}")
try_run(${VAR} ${VAR}_COMPILED
${CMAKE_BINARY_DIR}
${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/src.c
COMPILE_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS}
CMAKE_FLAGS -DCOMPILE_DEFINITIONS:STRING=${MACRO_CHECK_FUNCTION_DEFINITIONS}
"${CURL_CHECK_C_SOURCE_COMPILES_ADD_LIBRARIES}"
"${CURL_CHECK_C_SOURCE_COMPILES_ADD_INCLUDES}"
OUTPUT_VARIABLE OUTPUT)
# if it did not compile make the return value fail code of 1
if(NOT ${VAR}_COMPILED)
set(${VAR} 1)
endif(NOT ${VAR}_COMPILED)
# if the return value was 0 then it worked
set(result_var ${${VAR}})
if("${result_var}" EQUAL 0)
set(${VAR} 1 CACHE INTERNAL "Test ${message}")
message(STATUS "Performing Test ${message} - Success")
file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log
"Performing C SOURCE FILE Test ${message} succeded with the following output:\n"
"${OUTPUT}\n"
"Return value: ${${VAR}}\n"
"Source file was:\n${src}\n")
else("${result_var}" EQUAL 0)
message(STATUS "Performing Test ${message} - Failed")
set(${VAR} "" CACHE INTERNAL "Test ${message}")
file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
"Performing C SOURCE FILE Test ${message} failed with the following output:\n"
"${OUTPUT}\n"
"Return value: ${result_var}\n"
"Source file was:\n${src}\n")
endif("${result_var}" EQUAL 0)
endif("${VAR}" MATCHES "^${VAR}$" OR "${VAR}" MATCHES "UNKNOWN")
endmacro(CURL_CHECK_C_SOURCE_RUNS)

View File

@ -1,711 +0,0 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#ifdef TIME_WITH_SYS_TIME
/* Time with sys/time test */
#include <sys/types.h>
#include <sys/time.h>
#include <time.h>
int
main ()
{
if ((struct tm *) 0)
return 0;
;
return 0;
}
#endif
#ifdef HAVE_FCNTL_O_NONBLOCK
/* headers for FCNTL_O_NONBLOCK test */
#include <sys/types.h>
#include <unistd.h>
#include <fcntl.h>
/* */
#if defined(sun) || defined(__sun__) || \
defined(__SUNPRO_C) || defined(__SUNPRO_CC)
# if defined(__SVR4) || defined(__srv4__)
# define PLATFORM_SOLARIS
# else
# define PLATFORM_SUNOS4
# endif
#endif
#if (defined(_AIX) || defined(__xlC__)) && !defined(_AIX41)
# define PLATFORM_AIX_V3
#endif
/* */
#if defined(PLATFORM_SUNOS4) || defined(PLATFORM_AIX_V3) || defined(__BEOS__)
#error "O_NONBLOCK does not work on this platform"
#endif
int
main ()
{
/* O_NONBLOCK source test */
int flags = 0;
if(0 != fcntl(0, F_SETFL, flags | O_NONBLOCK))
return 1;
return 0;
}
#endif
#ifdef HAVE_GETHOSTBYADDR_R_5
#include <sys/types.h>
#include <netdb.h>
int
main ()
{
char * address;
int length;
int type;
struct hostent h;
struct hostent_data hdata;
int rc;
#ifndef gethostbyaddr_r
(void)gethostbyaddr_r;
#endif
rc = gethostbyaddr_r(address, length, type, &h, &hdata);
;
return 0;
}
#endif
#ifdef HAVE_GETHOSTBYADDR_R_5_REENTRANT
#define _REENTRANT
#include <sys/types.h>
#include <netdb.h>
int
main ()
{
char * address;
int length;q
int type;
struct hostent h;
struct hostent_data hdata;
int rc;
#ifndef gethostbyaddr_r
(void)gethostbyaddr_r;
#endif
rc = gethostbyaddr_r(address, length, type, &h, &hdata);
;
return 0;
}
#endif
#ifdef HAVE_GETHOSTBYADDR_R_7
#include <sys/types.h>
#include <netdb.h>
int
main ()
{
char * address;
int length;
int type;
struct hostent h;
char buffer[8192];
int h_errnop;
struct hostent * hp;
#ifndef gethostbyaddr_r
(void)gethostbyaddr_r;
#endif
hp = gethostbyaddr_r(address, length, type, &h,
buffer, 8192, &h_errnop);
;
return 0;
}
#endif
#ifdef HAVE_GETHOSTBYADDR_R_7_REENTRANT
#define _REENTRANT
#include <sys/types.h>
#include <netdb.h>
int
main ()
{
char * address;
int length;
int type;
struct hostent h;
char buffer[8192];
int h_errnop;
struct hostent * hp;
#ifndef gethostbyaddr_r
(void)gethostbyaddr_r;
#endif
hp = gethostbyaddr_r(address, length, type, &h,
buffer, 8192, &h_errnop);
;
return 0;
}
#endif
#ifdef HAVE_GETHOSTBYADDR_R_8
#include <sys/types.h>
#include <netdb.h>
int
main ()
{
char * address;
int length;
int type;
struct hostent h;
char buffer[8192];
int h_errnop;
struct hostent * hp;
int rc;
#ifndef gethostbyaddr_r
(void)gethostbyaddr_r;
#endif
rc = gethostbyaddr_r(address, length, type, &h,
buffer, 8192, &hp, &h_errnop);
;
return 0;
}
#endif
#ifdef HAVE_GETHOSTBYADDR_R_8_REENTRANT
#define _REENTRANT
#include <sys/types.h>
#include <netdb.h>
int
main ()
{
char * address;
int length;
int type;
struct hostent h;
char buffer[8192];
int h_errnop;
struct hostent * hp;
int rc;
#ifndef gethostbyaddr_r
(void)gethostbyaddr_r;
#endif
rc = gethostbyaddr_r(address, length, type, &h,
buffer, 8192, &hp, &h_errnop);
;
return 0;
}
#endif
#ifdef HAVE_GETHOSTBYNAME_R_3
#include <string.h>
#include <sys/types.h>
#include <netdb.h>
#undef NULL
#define NULL (void *)0
int
main ()
{
struct hostent_data data;
#ifndef gethostbyname_r
(void)gethostbyname_r;
#endif
gethostbyname_r(NULL, NULL, NULL);
;
return 0;
}
#endif
#ifdef HAVE_GETHOSTBYNAME_R_3_REENTRANT
#define _REENTRANT
#include <string.h>
#include <sys/types.h>
#include <netdb.h>
#undef NULL
#define NULL (void *)0
int
main ()
{
struct hostent_data data;
#ifndef gethostbyname_r
(void)gethostbyname_r;
#endif
gethostbyname_r(NULL, NULL, NULL);
;
return 0;
}
#endif
#ifdef HAVE_GETHOSTBYNAME_R_5
#include <sys/types.h>
#include <netinet/in.h>
#include <netdb.h>
#undef NULL
#define NULL (void *)0
int
main ()
{
#ifndef gethostbyname_r
(void)gethostbyname_r;
#endif
gethostbyname_r(NULL, NULL, NULL, 0, NULL);
;
return 0;
}
#endif
#ifdef HAVE_GETHOSTBYNAME_R_5_REENTRANT
#define _REENTRANT
#include <sys/types.h>
#include <netdb.h>
#undef NULL
#define NULL (void *)0
int
main ()
{
#ifndef gethostbyname_r
(void)gethostbyname_r;
#endif
gethostbyname_r(NULL, NULL, NULL, 0, NULL);
;
return 0;
}
#endif
#ifdef HAVE_GETHOSTBYNAME_R_6
#include <sys/types.h>
#include <netdb.h>
#undef NULL
#define NULL (void *)0
int
main ()
{
#ifndef gethostbyname_r
(void)gethostbyname_r;
#endif
gethostbyname_r(NULL, NULL, NULL, 0, NULL, NULL);
;
return 0;
}
#endif
#ifdef HAVE_GETHOSTBYNAME_R_6_REENTRANT
#define _REENTRANT
#include <sys/types.h>
#include <netdb.h>
#undef NULL
#define NULL (void *)0
int
main ()
{
#ifndef gethostbyname_r
(void)gethostbyname_r;
#endif
gethostbyname_r(NULL, NULL, NULL, 0, NULL, NULL);
;
return 0;
}
#endif
#ifdef HAVE_SOCKLEN_T
#ifdef _WIN32
#include <ws2tcpip.h>
#else
#include <sys/types.h>
#include <sys/socket.h>
#endif
int
main ()
{
if ((socklen_t *) 0)
return 0;
if (sizeof (socklen_t))
return 0;
;
return 0;
}
#endif
#ifdef HAVE_IN_ADDR_T
#include <sys/types.h>
#include <sys/socket.h>
#include <arpa/inet.h>
int
main ()
{
if ((in_addr_t *) 0)
return 0;
if (sizeof (in_addr_t))
return 0;
;
return 0;
}
#endif
#ifdef HAVE_BOOL_T
#ifdef HAVE_SYS_TYPES_H
#include <sys/types.h>
#endif
#ifdef HAVE_STDBOOL_H
#include <stdbool.h>
#endif
int
main ()
{
if (sizeof (bool *) )
return 0;
;
return 0;
}
#endif
#ifdef STDC_HEADERS
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <float.h>
int main() { return 0; }
#endif
#ifdef RETSIGTYPE_TEST
#include <sys/types.h>
#include <signal.h>
#ifdef signal
# undef signal
#endif
#ifdef __cplusplus
extern "C" void (*signal (int, void (*)(int)))(int);
#else
void (*signal ()) ();
#endif
int
main ()
{
return 0;
}
#endif
#ifdef HAVE_INET_NTOA_R_DECL
#include <arpa/inet.h>
typedef void (*func_type)();
int main()
{
#ifndef inet_ntoa_r
func_type func;
func = (func_type)inet_ntoa_r;
#endif
return 0;
}
#endif
#ifdef HAVE_INET_NTOA_R_DECL_REENTRANT
#define _REENTRANT
#include <arpa/inet.h>
typedef void (*func_type)();
int main()
{
#ifndef inet_ntoa_r
func_type func;
func = (func_type)&inet_ntoa_r;
#endif
return 0;
}
#endif
#ifdef HAVE_GETADDRINFO
#include <netdb.h>
#include <sys/types.h>
#include <sys/socket.h>
int main(void) {
struct addrinfo hints, *ai;
int error;
memset(&hints, 0, sizeof(hints));
hints.ai_family = AF_UNSPEC;
hints.ai_socktype = SOCK_STREAM;
#ifndef getaddrinfo
(void)getaddrinfo;
#endif
error = getaddrinfo("127.0.0.1", "8080", &hints, &ai);
if (error) {
return 1;
}
return 0;
}
#endif
#ifdef HAVE_FILE_OFFSET_BITS
#ifdef _FILE_OFFSET_BITS
#undef _FILE_OFFSET_BITS
#endif
#define _FILE_OFFSET_BITS 64
#include <sys/types.h>
/* Check that off_t can represent 2**63 - 1 correctly.
We can't simply define LARGE_OFF_T to be 9223372036854775807,
since some C++ compilers masquerading as C compilers
incorrectly reject 9223372036854775807. */
#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
&& LARGE_OFF_T % 2147483647 == 1)
? 1 : -1];
int main () { ; return 0; }
#endif
#ifdef HAVE_IOCTLSOCKET
/* includes start */
#ifdef HAVE_WINDOWS_H
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# ifdef HAVE_WINSOCK2_H
# include <winsock2.h>
# else
# ifdef HAVE_WINSOCK_H
# include <winsock.h>
# endif
# endif
#endif
int
main ()
{
/* ioctlsocket source code */
int socket;
unsigned long flags = ioctlsocket(socket, FIONBIO, &flags);
;
return 0;
}
#endif
#ifdef HAVE_IOCTLSOCKET_CAMEL
/* includes start */
#ifdef HAVE_WINDOWS_H
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# ifdef HAVE_WINSOCK2_H
# include <winsock2.h>
# else
# ifdef HAVE_WINSOCK_H
# include <winsock.h>
# endif
# endif
#endif
int
main ()
{
/* IoctlSocket source code */
if(0 != IoctlSocket(0, 0, 0))
return 1;
;
return 0;
}
#endif
#ifdef HAVE_IOCTLSOCKET_CAMEL_FIONBIO
/* includes start */
#ifdef HAVE_WINDOWS_H
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# ifdef HAVE_WINSOCK2_H
# include <winsock2.h>
# else
# ifdef HAVE_WINSOCK_H
# include <winsock.h>
# endif
# endif
#endif
int
main ()
{
/* IoctlSocket source code */
long flags = 0;
if(0 != ioctlsocket(0, FIONBIO, &flags))
return 1;
;
return 0;
}
#endif
#ifdef HAVE_IOCTLSOCKET_FIONBIO
/* includes start */
#ifdef HAVE_WINDOWS_H
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# ifdef HAVE_WINSOCK2_H
# include <winsock2.h>
# else
# ifdef HAVE_WINSOCK_H
# include <winsock.h>
# endif
# endif
#endif
int
main ()
{
int flags = 0;
if(0 != ioctlsocket(0, FIONBIO, &flags))
return 1;
;
return 0;
}
#endif
#ifdef HAVE_IOCTL_FIONBIO
/* headers for FIONBIO test */
/* includes start */
#ifdef HAVE_SYS_TYPES_H
# include <sys/types.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef HAVE_SYS_SOCKET_H
# include <sys/socket.h>
#endif
#ifdef HAVE_SYS_IOCTL_H
# include <sys/ioctl.h>
#endif
#ifdef HAVE_STROPTS_H
# include <stropts.h>
#endif
int
main ()
{
int flags = 0;
if(0 != ioctl(0, FIONBIO, &flags))
return 1;
;
return 0;
}
#endif
#ifdef HAVE_IOCTL_SIOCGIFADDR
/* headers for FIONBIO test */
/* includes start */
#ifdef HAVE_SYS_TYPES_H
# include <sys/types.h>
#endif
#ifdef HAVE_UNISTD_H
# include <unistd.h>
#endif
#ifdef HAVE_SYS_SOCKET_H
# include <sys/socket.h>
#endif
#ifdef HAVE_SYS_IOCTL_H
# include <sys/ioctl.h>
#endif
#ifdef HAVE_STROPTS_H
# include <stropts.h>
#endif
#include <net/if.h>
int
main ()
{
struct ifreq ifr;
if(0 != ioctl(0, SIOCGIFADDR, &ifr))
return 1;
;
return 0;
}
#endif
#ifdef HAVE_SETSOCKOPT_SO_NONBLOCK
/* includes start */
#ifdef HAVE_WINDOWS_H
# ifndef WIN32_LEAN_AND_MEAN
# define WIN32_LEAN_AND_MEAN
# endif
# include <windows.h>
# ifdef HAVE_WINSOCK2_H
# include <winsock2.h>
# else
# ifdef HAVE_WINSOCK_H
# include <winsock.h>
# endif
# endif
#endif
/* includes start */
#ifdef HAVE_SYS_TYPES_H
# include <sys/types.h>
#endif
#ifdef HAVE_SYS_SOCKET_H
# include <sys/socket.h>
#endif
/* includes end */
int
main ()
{
if(0 != setsockopt(0, SOL_SOCKET, SO_NONBLOCK, 0, 0))
return 1;
;
return 0;
}
#endif
#ifdef HAVE_GLIBC_STRERROR_R
#include <string.h>
#include <errno.h>
int
main () {
char buffer[1024]; /* big enough to play with */
char *string =
strerror_r(EACCES, buffer, sizeof(buffer));
/* this should've returned a string */
if(!string || !string[0])
return 99;
return 0;
}
#endif
#ifdef HAVE_POSIX_STRERROR_R
#include <string.h>
#include <errno.h>
int
main () {
char buffer[1024]; /* big enough to play with */
int error =
strerror_r(EACCES, buffer, sizeof(buffer));
/* This should've returned zero, and written an error string in the
buffer.*/
if(!buffer[0] || error)
return 99;
return 0;
}
#endif

View File

@ -1,21 +0,0 @@
# Extension of the standard FindOpenSSL.cmake
# Adds OPENSSL_INCLUDE_DIRS and libeay32
include("${CMAKE_ROOT}/Modules/FindOpenSSL.cmake")
# starting 2.8 it is better to use standard modules
if(CMAKE_MAJOR_VERSION EQUAL "2" AND CMAKE_MINOR_VERSION LESS "8")
# Bill Hoffman told that libeay32 is necessary for him:
find_library(SSL_LIBEAY NAMES libeay32)
if(OPENSSL_FOUND)
if(SSL_LIBEAY)
list(APPEND OPENSSL_LIBRARIES ${SSL_LIBEAY})
else()
set(OPENSSL_FOUND FALSE)
endif()
endif()
endif() # if (CMAKE_MAJOR_VERSION EQUAL "2" AND CMAKE_MINOR_VERSION LESS "8")
if(OPENSSL_FOUND)
set(OPENSSL_INCLUDE_DIRS ${OPENSSL_INCLUDE_DIR})
endif()

View File

@ -1,10 +0,0 @@
# Locate zlib
include("${CMAKE_ROOT}/Modules/FindZLIB.cmake")
# starting 2.8 it is better to use standard modules
if(CMAKE_MAJOR_VERSION EQUAL "2" AND CMAKE_MINOR_VERSION LESS "8")
find_library(ZLIB_LIBRARY_DEBUG NAMES zd zlibd zdlld zlib1d )
if(ZLIB_FOUND AND ZLIB_LIBRARY_DEBUG)
set( ZLIB_LIBRARIES optimized "${ZLIB_LIBRARY}" debug ${ZLIB_LIBRARY_DEBUG})
endif()
endif()

View File

@ -1,250 +0,0 @@
include(CurlCheckCSourceCompiles)
set(EXTRA_DEFINES "__unused1\n#undef inline\n#define __unused2")
set(HEADER_INCLUDES)
set(headers_hack)
macro(add_header_include check header)
if(${check})
set(headers_hack
"${headers_hack}\n#include <${header}>")
#SET(HEADER_INCLUDES
# ${HEADER_INCLUDES}
# "${header}")
endif(${check})
endmacro(add_header_include)
set(signature_call_conv)
if(HAVE_WINDOWS_H)
add_header_include(HAVE_WINDOWS_H "windows.h")
add_header_include(HAVE_WINSOCK2_H "winsock2.h")
add_header_include(HAVE_WINSOCK_H "winsock.h")
set(EXTRA_DEFINES ${EXTRA_DEFINES}
"__unused7\n#ifndef WIN32_LEAN_AND_MEAN\n#define WIN32_LEAN_AND_MEAN\n#endif\n#define __unused3")
set(signature_call_conv "PASCAL")
else(HAVE_WINDOWS_H)
add_header_include(HAVE_SYS_TYPES_H "sys/types.h")
add_header_include(HAVE_SYS_SOCKET_H "sys/socket.h")
endif(HAVE_WINDOWS_H)
set(EXTRA_DEFINES_BACKUP "${EXTRA_DEFINES}")
set(EXTRA_DEFINES "${EXTRA_DEFINES_BACKUP}\n${headers_hack}\n${extern_line}\n#define __unused5")
curl_check_c_source_compiles("recv(0, 0, 0, 0)" curl_cv_recv)
if(curl_cv_recv)
# AC_CACHE_CHECK([types of arguments and return type for recv],
#[curl_cv_func_recv_args], [
#SET(curl_cv_func_recv_args "unknown")
#for recv_retv in 'int' 'ssize_t'; do
if(NOT DEFINED curl_cv_func_recv_args OR "${curl_cv_func_recv_args}" STREQUAL "unknown")
foreach(recv_retv "int" "ssize_t" )
foreach(recv_arg1 "int" "ssize_t" "SOCKET")
foreach(recv_arg2 "void *" "char *")
foreach(recv_arg3 "size_t" "int" "socklen_t" "unsigned int")
foreach(recv_arg4 "int" "unsigned int")
if(NOT curl_cv_func_recv_done)
set(curl_cv_func_recv_test "UNKNOWN")
set(extern_line "extern ${recv_retv} ${signature_call_conv} recv(${recv_arg1}, ${recv_arg2}, ${recv_arg3}, ${recv_arg4})\;")
set(EXTRA_DEFINES "${EXTRA_DEFINES_BACKUP}\n${headers_hack}\n${extern_line}\n#define __unused5")
curl_check_c_source_compiles("
${recv_arg1} s=0;
${recv_arg2} buf=0;
${recv_arg3} len=0;
${recv_arg4} flags=0;
${recv_retv} res = recv(s, buf, len, flags)"
curl_cv_func_recv_test
"${recv_retv} recv(${recv_arg1}, ${recv_arg2}, ${recv_arg3}, ${recv_arg4})")
if(curl_cv_func_recv_test)
set(curl_cv_func_recv_args
"${recv_arg1},${recv_arg2},${recv_arg3},${recv_arg4},${recv_retv}")
set(RECV_TYPE_ARG1 "${recv_arg1}")
set(RECV_TYPE_ARG2 "${recv_arg2}")
set(RECV_TYPE_ARG3 "${recv_arg3}")
set(RECV_TYPE_ARG4 "${recv_arg4}")
set(RECV_TYPE_RETV "${recv_retv}")
set(HAVE_RECV 1)
set(curl_cv_func_recv_done 1)
endif(curl_cv_func_recv_test)
endif(NOT curl_cv_func_recv_done)
endforeach(recv_arg4)
endforeach(recv_arg3)
endforeach(recv_arg2)
endforeach(recv_arg1)
endforeach(recv_retv)
else(NOT DEFINED curl_cv_func_recv_args OR "${curl_cv_func_recv_args}" STREQUAL "unknown")
string(REGEX REPLACE "^([^,]*),[^,]*,[^,]*,[^,]*,[^,]*$" "\\1" RECV_TYPE_ARG1 "${curl_cv_func_recv_args}")
string(REGEX REPLACE "^[^,]*,([^,]*),[^,]*,[^,]*,[^,]*$" "\\1" RECV_TYPE_ARG2 "${curl_cv_func_recv_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,([^,]*),[^,]*,[^,]*$" "\\1" RECV_TYPE_ARG3 "${curl_cv_func_recv_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,([^,]*),[^,]*$" "\\1" RECV_TYPE_ARG4 "${curl_cv_func_recv_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,[^,]*,([^,]*)$" "\\1" RECV_TYPE_RETV "${curl_cv_func_recv_args}")
#MESSAGE("RECV_TYPE_ARG1 ${RECV_TYPE_ARG1}")
#MESSAGE("RECV_TYPE_ARG2 ${RECV_TYPE_ARG2}")
#MESSAGE("RECV_TYPE_ARG3 ${RECV_TYPE_ARG3}")
#MESSAGE("RECV_TYPE_ARG4 ${RECV_TYPE_ARG4}")
#MESSAGE("RECV_TYPE_RETV ${RECV_TYPE_RETV}")
endif(NOT DEFINED curl_cv_func_recv_args OR "${curl_cv_func_recv_args}" STREQUAL "unknown")
if("${curl_cv_func_recv_args}" STREQUAL "unknown")
message(FATAL_ERROR "Cannot find proper types to use for recv args")
endif("${curl_cv_func_recv_args}" STREQUAL "unknown")
else(curl_cv_recv)
message(FATAL_ERROR "Unable to link function recv")
endif(curl_cv_recv)
set(curl_cv_func_recv_args "${curl_cv_func_recv_args}" CACHE INTERNAL "Arguments for recv")
set(HAVE_RECV 1)
curl_check_c_source_compiles("send(0, 0, 0, 0)" curl_cv_send)
if(curl_cv_send)
# AC_CACHE_CHECK([types of arguments and return type for send],
#[curl_cv_func_send_args], [
#SET(curl_cv_func_send_args "unknown")
#for send_retv in 'int' 'ssize_t'; do
if(NOT DEFINED curl_cv_func_send_args OR "${curl_cv_func_send_args}" STREQUAL "unknown")
foreach(send_retv "int" "ssize_t" )
foreach(send_arg1 "int" "ssize_t" "SOCKET")
foreach(send_arg2 "const void *" "void *" "char *" "const char *")
foreach(send_arg3 "size_t" "int" "socklen_t" "unsigned int")
foreach(send_arg4 "int" "unsigned int")
if(NOT curl_cv_func_send_done)
set(curl_cv_func_send_test "UNKNOWN")
set(extern_line "extern ${send_retv} ${signature_call_conv} send(${send_arg1}, ${send_arg2}, ${send_arg3}, ${send_arg4})\;")
set(EXTRA_DEFINES "${EXTRA_DEFINES_BACKUP}\n${headers_hack}\n${extern_line}\n#define __unused5")
curl_check_c_source_compiles("
${send_arg1} s=0;
${send_arg2} buf=0;
${send_arg3} len=0;
${send_arg4} flags=0;
${send_retv} res = send(s, buf, len, flags)"
curl_cv_func_send_test
"${send_retv} send(${send_arg1}, ${send_arg2}, ${send_arg3}, ${send_arg4})")
if(curl_cv_func_send_test)
#MESSAGE("Found arguments: ${curl_cv_func_send_test}")
string(REGEX REPLACE "(const) .*" "\\1" send_qual_arg2 "${send_arg2}")
string(REGEX REPLACE "const (.*)" "\\1" send_arg2 "${send_arg2}")
set(curl_cv_func_send_args
"${send_arg1},${send_arg2},${send_arg3},${send_arg4},${send_retv},${send_qual_arg2}")
set(SEND_TYPE_ARG1 "${send_arg1}")
set(SEND_TYPE_ARG2 "${send_arg2}")
set(SEND_TYPE_ARG3 "${send_arg3}")
set(SEND_TYPE_ARG4 "${send_arg4}")
set(SEND_TYPE_RETV "${send_retv}")
set(HAVE_SEND 1)
set(curl_cv_func_send_done 1)
endif(curl_cv_func_send_test)
endif(NOT curl_cv_func_send_done)
endforeach(send_arg4)
endforeach(send_arg3)
endforeach(send_arg2)
endforeach(send_arg1)
endforeach(send_retv)
else(NOT DEFINED curl_cv_func_send_args OR "${curl_cv_func_send_args}" STREQUAL "unknown")
string(REGEX REPLACE "^([^,]*),[^,]*,[^,]*,[^,]*,[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG1 "${curl_cv_func_send_args}")
string(REGEX REPLACE "^[^,]*,([^,]*),[^,]*,[^,]*,[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG2 "${curl_cv_func_send_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,([^,]*),[^,]*,[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG3 "${curl_cv_func_send_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,([^,]*),[^,]*,[^,]*$" "\\1" SEND_TYPE_ARG4 "${curl_cv_func_send_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,[^,]*,([^,]*),[^,]*$" "\\1" SEND_TYPE_RETV "${curl_cv_func_send_args}")
string(REGEX REPLACE "^[^,]*,[^,]*,[^,]*,[^,]*,[^,]*,([^,]*)$" "\\1" SEND_QUAL_ARG2 "${curl_cv_func_send_args}")
#MESSAGE("SEND_TYPE_ARG1 ${SEND_TYPE_ARG1}")
#MESSAGE("SEND_TYPE_ARG2 ${SEND_TYPE_ARG2}")
#MESSAGE("SEND_TYPE_ARG3 ${SEND_TYPE_ARG3}")
#MESSAGE("SEND_TYPE_ARG4 ${SEND_TYPE_ARG4}")
#MESSAGE("SEND_TYPE_RETV ${SEND_TYPE_RETV}")
#MESSAGE("SEND_QUAL_ARG2 ${SEND_QUAL_ARG2}")
endif(NOT DEFINED curl_cv_func_send_args OR "${curl_cv_func_send_args}" STREQUAL "unknown")
if("${curl_cv_func_send_args}" STREQUAL "unknown")
message(FATAL_ERROR "Cannot find proper types to use for send args")
endif("${curl_cv_func_send_args}" STREQUAL "unknown")
set(SEND_QUAL_ARG2 "const")
else(curl_cv_send)
message(FATAL_ERROR "Unable to link function send")
endif(curl_cv_send)
set(curl_cv_func_send_args "${curl_cv_func_send_args}" CACHE INTERNAL "Arguments for send")
set(HAVE_SEND 1)
set(EXTRA_DEFINES "${EXTRA_DEFINES}\n${headers_hack}\n#define __unused5")
curl_check_c_source_compiles("int flag = MSG_NOSIGNAL" HAVE_MSG_NOSIGNAL)
set(EXTRA_DEFINES "__unused1\n#undef inline\n#define __unused2")
set(HEADER_INCLUDES)
set(headers_hack)
macro(add_header_include check header)
if(${check})
set(headers_hack
"${headers_hack}\n#include <${header}>")
#SET(HEADER_INCLUDES
# ${HEADER_INCLUDES}
# "${header}")
endif(${check})
endmacro(add_header_include header)
if(HAVE_WINDOWS_H)
set(EXTRA_DEFINES ${EXTRA_DEFINES}
"__unused7\n#ifndef WIN32_LEAN_AND_MEAN\n#define WIN32_LEAN_AND_MEAN\n#endif\n#define __unused3")
add_header_include(HAVE_WINDOWS_H "windows.h")
add_header_include(HAVE_WINSOCK2_H "winsock2.h")
add_header_include(HAVE_WINSOCK_H "winsock.h")
else(HAVE_WINDOWS_H)
add_header_include(HAVE_SYS_TYPES_H "sys/types.h")
add_header_include(HAVE_SYS_TIME_H "sys/time.h")
add_header_include(TIME_WITH_SYS_TIME "time.h")
add_header_include(HAVE_TIME_H "time.h")
endif(HAVE_WINDOWS_H)
set(EXTRA_DEFINES "${EXTRA_DEFINES}\n${headers_hack}\n#define __unused5")
curl_check_c_source_compiles("struct timeval ts;\nts.tv_sec = 0;\nts.tv_usec = 0" HAVE_STRUCT_TIMEVAL)
include(CurlCheckCSourceRuns)
set(EXTRA_DEFINES)
set(HEADER_INCLUDES)
if(HAVE_SYS_POLL_H)
set(HEADER_INCLUDES "sys/poll.h")
endif(HAVE_SYS_POLL_H)
curl_check_c_source_runs("return poll((void *)0, 0, 10 /*ms*/)" HAVE_POLL_FINE)
set(HAVE_SIG_ATOMIC_T 1)
set(EXTRA_DEFINES)
set(HEADER_INCLUDES)
if(HAVE_SIGNAL_H)
set(HEADER_INCLUDES "signal.h")
set(CMAKE_EXTRA_INCLUDE_FILES "signal.h")
endif(HAVE_SIGNAL_H)
check_type_size("sig_atomic_t" SIZEOF_SIG_ATOMIC_T)
if(HAVE_SIZEOF_SIG_ATOMIC_T)
curl_check_c_source_compiles("static volatile sig_atomic_t dummy = 0" HAVE_SIG_ATOMIC_T_NOT_VOLATILE)
if(NOT HAVE_SIG_ATOMIC_T_NOT_VOLATILE)
set(HAVE_SIG_ATOMIC_T_VOLATILE 1)
endif(NOT HAVE_SIG_ATOMIC_T_NOT_VOLATILE)
endif(HAVE_SIZEOF_SIG_ATOMIC_T)
set(CHECK_TYPE_SIZE_PREINCLUDE
"#undef inline")
if(HAVE_WINDOWS_H)
set(CHECK_TYPE_SIZE_PREINCLUDE "${CHECK_TYPE_SIZE_PREINCLUDE}
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>")
if(HAVE_WINSOCK2_H)
set(CHECK_TYPE_SIZE_PREINCLUDE "${CHECK_TYPE_SIZE_PREINCLUDE}\n#include <winsock2.h>")
endif(HAVE_WINSOCK2_H)
else(HAVE_WINDOWS_H)
if(HAVE_SYS_SOCKET_H)
set(CMAKE_EXTRA_INCLUDE_FILES ${CMAKE_EXTRA_INCLUDE_FILES}
"sys/socket.h")
endif(HAVE_SYS_SOCKET_H)
if(HAVE_NETINET_IN_H)
set(CMAKE_EXTRA_INCLUDE_FILES ${CMAKE_EXTRA_INCLUDE_FILES}
"netinet/in.h")
endif(HAVE_NETINET_IN_H)
if(HAVE_ARPA_INET_H)
set(CMAKE_EXTRA_INCLUDE_FILES ${CMAKE_EXTRA_INCLUDE_FILES}
"arpa/inet.h")
endif(HAVE_ARPA_INET_H)
endif(HAVE_WINDOWS_H)
check_type_size("struct sockaddr_storage" SIZEOF_STRUCT_SOCKADDR_STORAGE)
if(HAVE_SIZEOF_STRUCT_SOCKADDR_STORAGE)
set(HAVE_STRUCT_SOCKADDR_STORAGE 1)
endif(HAVE_SIZEOF_STRUCT_SOCKADDR_STORAGE)

View File

@ -1,121 +0,0 @@
if(NOT UNIX)
if(WIN32)
set(HAVE_LIBDL 0)
set(HAVE_LIBUCB 0)
set(HAVE_LIBSOCKET 0)
set(NOT_NEED_LIBNSL 0)
set(HAVE_LIBNSL 0)
set(HAVE_LIBZ 0)
set(HAVE_LIBCRYPTO 0)
set(HAVE_DLOPEN 0)
set(HAVE_ALLOCA_H 0)
set(HAVE_ARPA_INET_H 0)
set(HAVE_DLFCN_H 0)
set(HAVE_FCNTL_H 1)
set(HAVE_FEATURES_H 0)
set(HAVE_INTTYPES_H 0)
set(HAVE_IO_H 1)
set(HAVE_MALLOC_H 1)
set(HAVE_MEMORY_H 1)
set(HAVE_NETDB_H 0)
set(HAVE_NETINET_IF_ETHER_H 0)
set(HAVE_NETINET_IN_H 0)
set(HAVE_NET_IF_H 0)
set(HAVE_PROCESS_H 1)
set(HAVE_PWD_H 0)
set(HAVE_SETJMP_H 1)
set(HAVE_SGTTY_H 0)
set(HAVE_SIGNAL_H 1)
set(HAVE_SOCKIO_H 0)
set(HAVE_STDINT_H 0)
set(HAVE_STDLIB_H 1)
set(HAVE_STRINGS_H 0)
set(HAVE_STRING_H 1)
set(HAVE_SYS_PARAM_H 0)
set(HAVE_SYS_POLL_H 0)
set(HAVE_SYS_SELECT_H 0)
set(HAVE_SYS_SOCKET_H 0)
set(HAVE_SYS_SOCKIO_H 0)
set(HAVE_SYS_STAT_H 1)
set(HAVE_SYS_TIME_H 0)
set(HAVE_SYS_TYPES_H 1)
set(HAVE_SYS_UTIME_H 1)
set(HAVE_TERMIOS_H 0)
set(HAVE_TERMIO_H 0)
set(HAVE_TIME_H 1)
set(HAVE_UNISTD_H 0)
set(HAVE_UTIME_H 0)
set(HAVE_X509_H 0)
set(HAVE_ZLIB_H 0)
set(HAVE_SIZEOF_LONG_DOUBLE 1)
set(SIZEOF_LONG_DOUBLE 8)
set(HAVE_SOCKET 1)
set(HAVE_POLL 0)
set(HAVE_SELECT 1)
set(HAVE_STRDUP 1)
set(HAVE_STRSTR 1)
set(HAVE_STRTOK_R 0)
set(HAVE_STRFTIME 1)
set(HAVE_UNAME 0)
set(HAVE_STRCASECMP 0)
set(HAVE_STRICMP 1)
set(HAVE_STRCMPI 1)
set(HAVE_GETHOSTBYADDR 1)
set(HAVE_GETTIMEOFDAY 0)
set(HAVE_INET_ADDR 1)
set(HAVE_INET_NTOA 1)
set(HAVE_INET_NTOA_R 0)
set(HAVE_TCGETATTR 0)
set(HAVE_TCSETATTR 0)
set(HAVE_PERROR 1)
set(HAVE_CLOSESOCKET 1)
set(HAVE_SETVBUF 0)
set(HAVE_SIGSETJMP 0)
set(HAVE_GETPASS_R 0)
set(HAVE_STRLCAT 0)
set(HAVE_GETPWUID 0)
set(HAVE_GETEUID 0)
set(HAVE_UTIME 1)
set(HAVE_RAND_EGD 0)
set(HAVE_RAND_SCREEN 0)
set(HAVE_RAND_STATUS 0)
set(HAVE_GMTIME_R 0)
set(HAVE_LOCALTIME_R 0)
set(HAVE_GETHOSTBYADDR_R 0)
set(HAVE_GETHOSTBYNAME_R 0)
set(HAVE_SIGNAL_FUNC 1)
set(HAVE_SIGNAL_MACRO 0)
set(HAVE_GETHOSTBYADDR_R_5 0)
set(HAVE_GETHOSTBYADDR_R_5_REENTRANT 0)
set(HAVE_GETHOSTBYADDR_R_7 0)
set(HAVE_GETHOSTBYADDR_R_7_REENTRANT 0)
set(HAVE_GETHOSTBYADDR_R_8 0)
set(HAVE_GETHOSTBYADDR_R_8_REENTRANT 0)
set(HAVE_GETHOSTBYNAME_R_3 0)
set(HAVE_GETHOSTBYNAME_R_3_REENTRANT 0)
set(HAVE_GETHOSTBYNAME_R_5 0)
set(HAVE_GETHOSTBYNAME_R_5_REENTRANT 0)
set(HAVE_GETHOSTBYNAME_R_6 0)
set(HAVE_GETHOSTBYNAME_R_6_REENTRANT 0)
set(TIME_WITH_SYS_TIME 0)
set(HAVE_O_NONBLOCK 0)
set(HAVE_IN_ADDR_T 0)
set(HAVE_INET_NTOA_R_DECL 0)
set(HAVE_INET_NTOA_R_DECL_REENTRANT 0)
set(HAVE_GETADDRINFO 0)
set(STDC_HEADERS 1)
set(RETSIGTYPE_TEST 1)
set(HAVE_SIGACTION 0)
set(HAVE_MACRO_SIGSETJMP 0)
else(WIN32)
message("This file should be included on Windows platform only")
endif(WIN32)
endif(NOT UNIX)

View File

@ -1,31 +0,0 @@
# File containing various utilities
# Converts a CMake list to a string containing elements separated by spaces
function(TO_LIST_SPACES _LIST_NAME OUTPUT_VAR)
set(NEW_LIST_SPACE)
foreach(ITEM ${${_LIST_NAME}})
set(NEW_LIST_SPACE "${NEW_LIST_SPACE} ${ITEM}")
endforeach()
string(STRIP ${NEW_LIST_SPACE} NEW_LIST_SPACE)
set(${OUTPUT_VAR} "${NEW_LIST_SPACE}" PARENT_SCOPE)
endfunction()
# Appends a lis of item to a string which is a space-separated list, if they don't already exist.
function(LIST_SPACES_APPEND_ONCE LIST_NAME)
string(REPLACE " " ";" _LIST ${${LIST_NAME}})
list(APPEND _LIST ${ARGN})
list(REMOVE_DUPLICATES _LIST)
to_list_spaces(_LIST NEW_LIST_SPACE)
set(${LIST_NAME} "${NEW_LIST_SPACE}" PARENT_SCOPE)
endfunction()
# Convinience function that does the same as LIST(FIND ...) but with a TRUE/FALSE return value.
# Ex: IN_STR_LIST(MY_LIST "Searched item" WAS_FOUND)
function(IN_STR_LIST LIST_NAME ITEM_SEARCHED RETVAL)
list(FIND ${LIST_NAME} ${ITEM_SEARCHED} FIND_POS)
if(${FIND_POS} EQUAL -1)
set(${RETVAL} FALSE PARENT_SCOPE)
else()
set(${RETVAL} TRUE PARENT_SCOPE)
endif()
endfunction()

View File

@ -1,864 +0,0 @@
# cURL/libcurl CMake script
# by Tetetest and Sukender (Benoit Neil)
# TODO:
# The output .so file lacks the soname number which we currently have within the lib/Makefile.am file
# Add full (4 or 5 libs) SSL support
# Add INSTALL target (EXTRA_DIST variables in Makefile.am may be moved to Makefile.inc so that CMake/CPack is aware of what's to include).
# Add CTests(?)
# Check on all possible platforms
# Test with as many configurations possible (With or without any option)
# Create scripts that help keeping the CMake build system up to date (to reduce maintenance). According to Tetetest:
# - lists of headers that 'configure' checks for;
# - curl-specific tests (the ones that are in m4/curl-*.m4 files);
# - (most obvious thing:) curl version numbers.
# Add documentation subproject
#
# To check:
# (From Daniel Stenberg) The cmake build selected to run gcc with -fPIC on my box while the plain configure script did not.
# (From Daniel Stenberg) The gcc command line use neither -g nor any -O options. As a developer, I also treasure our configure scripts's --enable-debug option that sets a long range of "picky" compiler options.
cmake_minimum_required(VERSION 2.6.2 FATAL_ERROR)
set(CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake;${CMAKE_MODULE_PATH}")
include(Utilities)
project( CURL C )
file (READ ${CURL_SOURCE_DIR}/include/curl/curlver.h CURL_VERSION_H_CONTENTS)
string (REGEX MATCH "LIBCURL_VERSION_MAJOR[ \t]+([0-9]+)"
LIBCURL_VERSION_MJ ${CURL_VERSION_H_CONTENTS})
string (REGEX MATCH "([0-9]+)"
LIBCURL_VERSION_MJ ${LIBCURL_VERSION_MJ})
string (REGEX MATCH
"LIBCURL_VERSION_MINOR[ \t]+([0-9]+)"
LIBCURL_VERSION_MI ${CURL_VERSION_H_CONTENTS})
string (REGEX MATCH "([0-9]+)" LIBCURL_VERSION_MI ${LIBCURL_VERSION_MI})
string (REGEX MATCH
"LIBCURL_VERSION_PATCH[ \t]+([0-9]+)"
LIBCURL_VERSION_PT ${CURL_VERSION_H_CONTENTS})
string (REGEX MATCH "([0-9]+)" LIBCURL_VERSION_PT ${LIBCURL_VERSION_PT})
set (CURL_MAJOR_VERSION ${LIBCURL_VERSION_MJ})
set (CURL_MINOR_VERSION ${LIBCURL_VERSION_MI})
set (CURL_PATCH_VERSION ${LIBCURL_VERSION_PT})
include_regular_expression("^.*$") # Sukender: Is it necessary?
# Setup package meta-data
# SET(PACKAGE "curl")
set(CURL_VERSION ${CURL_MAJOR_VERSION}.${CURL_MINOR_VERSION}.${CURL_PATCH_VERSION})
message(STATUS "curl version=[${CURL_VERSION}]")
# SET(PACKAGE_TARNAME "curl")
# SET(PACKAGE_NAME "curl")
# SET(PACKAGE_VERSION "-")
# SET(PACKAGE_STRING "curl-")
# SET(PACKAGE_BUGREPORT "a suitable curl mailing list => http://curl.haxx.se/mail/")
set(OPERATING_SYSTEM "${CMAKE_SYSTEM_NAME}")
set(OS "\"${CMAKE_SYSTEM_NAME}\"")
include_directories(${PROJECT_BINARY_DIR}/include/curl)
include_directories( ${CURL_SOURCE_DIR}/include )
option(BUILD_CURL_EXE "Set to ON to build cURL executable." ON)
option(BUILD_CURL_TESTS "Set to ON to build cURL tests." ON)
option(CURL_STATICLIB "Set to ON to build libcurl with static linking." OFF)
option(CURL_USE_ARES "Set to ON to enable c-ares support" OFF)
# initialize CURL_LIBS
set(CURL_LIBS "")
if(CURL_USE_ARES)
set(USE_ARES ${CURL_USE_ARES})
find_package(CARES REQUIRED)
list(APPEND CURL_LIBS ${CARES_LIBRARY} )
set(CURL_LIBS ${CURL_LIBS} ${CARES_LIBRARY})
endif()
option(BUILD_DASHBOARD_REPORTS "Set to ON to activate reporting of cURL builds here http://www.cdash.org/CDashPublic/index.php?project=CURL" OFF)
if(BUILD_DASHBOARD_REPORTS)
#INCLUDE(Dart)
include(CTest)
endif(BUILD_DASHBOARD_REPORTS)
if(MSVC)
option(BUILD_RELEASE_DEBUG_DIRS "Set OFF to build each configuration to a separate directory" OFF)
mark_as_advanced(BUILD_RELEASE_DEBUG_DIRS)
endif()
option(CURL_HIDDEN_SYMBOLS "Set to ON to hide libcurl internal symbols (=hide all symbols that aren't officially external)." ON)
mark_as_advanced(CURL_HIDDEN_SYMBOLS)
# IF(WIN32)
# OPTION(CURL_WINDOWS_SSPI "Use windows libraries to allow NTLM authentication without openssl" ON)
# MARK_AS_ADVANCED(CURL_WINDOWS_SSPI)
# ENDIF()
option(HTTP_ONLY "disables all protocols except HTTP (This overrides all CURL_DISABLE_* options)" OFF)
mark_as_advanced(HTTP_ONLY)
option(CURL_DISABLE_FTP "disables FTP" OFF)
mark_as_advanced(CURL_DISABLE_FTP)
option(CURL_DISABLE_LDAP "disables LDAP" OFF)
mark_as_advanced(CURL_DISABLE_LDAP)
option(CURL_DISABLE_TELNET "disables Telnet" OFF)
mark_as_advanced(CURL_DISABLE_TELNET)
option(CURL_DISABLE_DICT "disables DICT" OFF)
mark_as_advanced(CURL_DISABLE_DICT)
option(CURL_DISABLE_FILE "disables FILE" OFF)
mark_as_advanced(CURL_DISABLE_FILE)
option(CURL_DISABLE_TFTP "disables TFTP" OFF)
mark_as_advanced(CURL_DISABLE_TFTP)
option(CURL_DISABLE_HTTP "disables HTTP" OFF)
mark_as_advanced(CURL_DISABLE_HTTP)
option(CURL_DISABLE_LDAPS "to disable LDAPS" OFF)
mark_as_advanced(CURL_DISABLE_LDAPS)
if(HTTP_ONLY)
set(CURL_DISABLE_FTP ON)
set(CURL_DISABLE_LDAP ON)
set(CURL_DISABLE_LDAPS ON)
set(CURL_DISABLE_TELNET ON)
set(CURL_DISABLE_DICT ON)
set(CURL_DISABLE_FILE ON)
set(CURL_DISABLE_TFTP ON)
endif()
option(CURL_DISABLE_COOKIES "to disable cookies support" OFF)
mark_as_advanced(CURL_DISABLE_COOKIES)
option(CURL_DISABLE_CRYPTO_AUTH "to disable cryptographic authentication" OFF)
mark_as_advanced(CURL_DISABLE_CRYPTO_AUTH)
option(CURL_DISABLE_VERBOSE_STRINGS "to disable verbose strings" OFF)
mark_as_advanced(CURL_DISABLE_VERBOSE_STRINGS)
option(DISABLED_THREADSAFE "Set to explicitly specify we don't want to use thread-safe functions" OFF)
mark_as_advanced(DISABLED_THREADSAFE)
option(ENABLE_IPV6 "Define if you want to enable IPv6 support" OFF)
mark_as_advanced(ENABLE_IPV6)
# We need ansi c-flags, especially on HP
set(CMAKE_C_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_C_FLAGS}")
set(CMAKE_REQUIRED_FLAGS ${CMAKE_ANSI_CFLAGS})
# Disable warnings on Borland to avoid changing 3rd party code.
if(BORLAND)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w-")
endif(BORLAND)
# If we are on AIX, do the _ALL_SOURCE magic
if(${CMAKE_SYSTEM_NAME} MATCHES AIX)
set(_ALL_SOURCE 1)
endif(${CMAKE_SYSTEM_NAME} MATCHES AIX)
# Include all the necessary files for macros
include (CheckFunctionExists)
include (CheckIncludeFile)
include (CheckIncludeFiles)
include (CheckLibraryExists)
include (CheckSymbolExists)
include (CheckTypeSize)
# On windows preload settings
if(WIN32)
include(${CMAKE_CURRENT_SOURCE_DIR}/CMake/Platforms/WindowsCache.cmake)
endif(WIN32)
# This macro checks if the symbol exists in the library and if it
# does, it prepends library to the list.
macro(CHECK_LIBRARY_EXISTS_CONCAT LIBRARY SYMBOL VARIABLE)
check_library_exists("${LIBRARY};${CURL_LIBS}" ${SYMBOL} "${CMAKE_LIBRARY_PATH}"
${VARIABLE})
if(${VARIABLE})
set(CURL_LIBS ${LIBRARY} ${CURL_LIBS})
endif(${VARIABLE})
endmacro(CHECK_LIBRARY_EXISTS_CONCAT)
# Check for all needed libraries
check_library_exists_concat("dl" dlopen HAVE_LIBDL)
check_library_exists_concat("socket" connect HAVE_LIBSOCKET)
check_library_exists("c" gethostbyname "" NOT_NEED_LIBNSL)
# Yellowtab Zeta needs different libraries than BeOS 5.
if(BEOS)
set(NOT_NEED_LIBNSL 1)
check_library_exists_concat("bind" gethostbyname HAVE_LIBBIND)
check_library_exists_concat("bnetapi" closesocket HAVE_LIBBNETAPI)
endif(BEOS)
if(NOT NOT_NEED_LIBNSL)
check_library_exists_concat("nsl" gethostbyname HAVE_LIBNSL)
endif(NOT NOT_NEED_LIBNSL)
check_library_exists_concat("ws2_32" getch HAVE_LIBWS2_32)
check_library_exists_concat("winmm" getch HAVE_LIBWINMM)
check_library_exists("wldap32" cldap_open "" HAVE_WLDAP32)
if(WIN32)
set(CURL_DEFAULT_DISABLE_LDAP OFF)
# some windows compilers do not have wldap32
if(NOT HAVE_WLDAP32)
set(CURL_DISABLE_LDAP ON CACHE BOOL "" FORCE)
message(STATUS "wldap32 not found CURL_DISABLE_LDAP set ON")
option(CURL_LDAP_WIN "Use Windows LDAP implementation" OFF)
else()
option(CURL_LDAP_WIN "Use Windows LDAP implementation" ON)
endif()
mark_as_advanced(CURL_LDAP_WIN)
endif()
# IF(NOT CURL_SPECIAL_LIBZ)
# CHECK_LIBRARY_EXISTS_CONCAT("z" inflateEnd HAVE_LIBZ)
# ENDIF(NOT CURL_SPECIAL_LIBZ)
# Check for idn
check_library_exists_concat("idn" idna_to_ascii_lz HAVE_LIBIDN)
# Check for LDAP
check_library_exists_concat("ldap" ldap_init HAVE_LIBLDAP)
# if(NOT HAVE_LIBLDAP)
# SET(CURL_DISABLE_LDAP ON)
# endif(NOT HAVE_LIBLDAP)
# Check for symbol dlopen (same as HAVE_LIBDL)
check_library_exists("${CURL_LIBS}" dlopen "" HAVE_DLOPEN)
# For other tests to use the same libraries
set(CMAKE_REQUIRED_LIBRARIES ${CURL_LIBS})
option(CURL_ZLIB "Set to ON to enable building cURL with zlib support." ON)
set(HAVE_LIBZ OFF)
set(HAVE_ZLIB_H OFF)
set(HAVE_ZLIB OFF)
if(CURL_ZLIB) # AND CURL_CONFIG_HAS_BEEN_RUN_BEFORE
find_package(ZLIB QUIET)
if(ZLIB_FOUND)
set(HAVE_ZLIB_H ON)
set(HAVE_ZLIB ON)
set(HAVE_LIBZ ON)
list(APPEND CURL_LIBS ${ZLIB_LIBRARIES})
endif()
endif()
option(CMAKE_USE_OPENSSL "Use OpenSSL code. Experimental" ON)
mark_as_advanced(CMAKE_USE_OPENSSL)
if(CMAKE_USE_OPENSSL)
set(USE_SSLEAY OFF)
set(USE_OPENSSL OFF)
set(HAVE_LIBCRYPTO OFF)
set(HAVE_LIBSSL OFF)
find_package(OpenSSL)
if(OPENSSL_FOUND)
list(APPEND CURL_LIBS ${OPENSSL_LIBRARIES})
list(APPEND CMAKE_REQUIRED_INCLUDES ${OPENSSL_INCLUDE_DIR})
set(USE_SSLEAY ON)
set(USE_OPENSSL ON)
set(HAVE_LIBCRYPTO ON)
set(HAVE_LIBSSL ON)
endif(OPENSSL_FOUND)
endif(CMAKE_USE_OPENSSL)
# If we have features.h, then do the _BSD_SOURCE magic
check_include_file("features.h" HAVE_FEATURES_H)
# Check if header file exists and add it to the list.
macro(CHECK_INCLUDE_FILE_CONCAT FILE VARIABLE)
check_include_files("${CURL_INCLUDES};${FILE}" ${VARIABLE})
if(${VARIABLE})
set(CURL_INCLUDES ${CURL_INCLUDES} ${FILE})
set(CURL_TEST_DEFINES "${CURL_TEST_DEFINES} -D${VARIABLE}")
endif(${VARIABLE})
endmacro(CHECK_INCLUDE_FILE_CONCAT)
# Check for header files
if(NOT UNIX)
check_include_file_concat("ws2tcpip.h" HAVE_WS2TCPIP_H)
check_include_file_concat("winsock2.h" HAVE_WINSOCK2_H)
endif(NOT UNIX)
check_include_file_concat("stdio.h" HAVE_STDIO_H)
if(NOT UNIX)
check_include_file_concat("windows.h" HAVE_WINDOWS_H)
check_include_file_concat("winsock.h" HAVE_WINSOCK_H)
endif(NOT UNIX)
check_include_file_concat("inttypes.h" HAVE_INTTYPES_H)
check_include_file_concat("sys/filio.h" HAVE_SYS_FILIO_H)
check_include_file_concat("sys/ioctl.h" HAVE_SYS_IOCTL_H)
check_include_file_concat("sys/param.h" HAVE_SYS_PARAM_H)
check_include_file_concat("sys/poll.h" HAVE_SYS_POLL_H)
check_include_file_concat("sys/resource.h" HAVE_SYS_RESOURCE_H)
check_include_file_concat("sys/select.h" HAVE_SYS_SELECT_H)
check_include_file_concat("sys/socket.h" HAVE_SYS_SOCKET_H)
check_include_file_concat("sys/sockio.h" HAVE_SYS_SOCKIO_H)
check_include_file_concat("sys/stat.h" HAVE_SYS_STAT_H)
check_include_file_concat("sys/time.h" HAVE_SYS_TIME_H)
check_include_file_concat("sys/types.h" HAVE_SYS_TYPES_H)
check_include_file_concat("sys/uio.h" HAVE_SYS_UIO_H)
check_include_file_concat("sys/un.h" HAVE_SYS_UN_H)
check_include_file_concat("sys/utime.h" HAVE_SYS_UTIME_H)
check_include_file_concat("alloca.h" HAVE_ALLOCA_H)
check_include_file_concat("arpa/inet.h" HAVE_ARPA_INET_H)
check_include_file_concat("arpa/tftp.h" HAVE_ARPA_TFTP_H)
check_include_file_concat("assert.h" HAVE_ASSERT_H)
check_include_file_concat("crypto.h" HAVE_CRYPTO_H)
check_include_file_concat("des.h" HAVE_DES_H)
check_include_file_concat("err.h" HAVE_ERR_H)
check_include_file_concat("errno.h" HAVE_ERRNO_H)
check_include_file_concat("fcntl.h" HAVE_FCNTL_H)
check_include_file_concat("gssapi/gssapi.h" HAVE_GSSAPI_GSSAPI_H)
check_include_file_concat("gssapi/gssapi_generic.h" HAVE_GSSAPI_GSSAPI_GENERIC_H)
check_include_file_concat("gssapi/gssapi_krb5.h" HAVE_GSSAPI_GSSAPI_KRB5_H)
check_include_file_concat("idn-free.h" HAVE_IDN_FREE_H)
check_include_file_concat("ifaddrs.h" HAVE_IFADDRS_H)
check_include_file_concat("io.h" HAVE_IO_H)
check_include_file_concat("krb.h" HAVE_KRB_H)
check_include_file_concat("libgen.h" HAVE_LIBGEN_H)
check_include_file_concat("libssh2.h" HAVE_LIBSSH2_H)
check_include_file_concat("limits.h" HAVE_LIMITS_H)
check_include_file_concat("locale.h" HAVE_LOCALE_H)
check_include_file_concat("net/if.h" HAVE_NET_IF_H)
check_include_file_concat("netdb.h" HAVE_NETDB_H)
check_include_file_concat("netinet/in.h" HAVE_NETINET_IN_H)
check_include_file_concat("netinet/tcp.h" HAVE_NETINET_TCP_H)
if(CMAKE_USE_OPENSSL AND OPENSSL_FOUND)
check_include_file_concat("openssl/crypto.h" HAVE_OPENSSL_CRYPTO_H)
check_include_file_concat("openssl/engine.h" HAVE_OPENSSL_ENGINE_H)
check_include_file_concat("openssl/err.h" HAVE_OPENSSL_ERR_H)
check_include_file_concat("openssl/pem.h" HAVE_OPENSSL_PEM_H)
check_include_file_concat("openssl/pkcs12.h" HAVE_OPENSSL_PKCS12_H)
check_include_file_concat("openssl/rsa.h" HAVE_OPENSSL_RSA_H)
check_include_file_concat("openssl/ssl.h" HAVE_OPENSSL_SSL_H)
check_include_file_concat("openssl/x509.h" HAVE_OPENSSL_X509_H)
check_include_file_concat("openssl/rand.h" HAVE_OPENSSL_RAND_H)
endif(CMAKE_USE_OPENSSL AND OPENSSL_FOUND)
check_include_file_concat("pem.h" HAVE_PEM_H)
check_include_file_concat("poll.h" HAVE_POLL_H)
check_include_file_concat("pwd.h" HAVE_PWD_H)
check_include_file_concat("rsa.h" HAVE_RSA_H)
check_include_file_concat("setjmp.h" HAVE_SETJMP_H)
check_include_file_concat("sgtty.h" HAVE_SGTTY_H)
check_include_file_concat("signal.h" HAVE_SIGNAL_H)
check_include_file_concat("ssl.h" HAVE_SSL_H)
check_include_file_concat("stdbool.h" HAVE_STDBOOL_H)
check_include_file_concat("stdint.h" HAVE_STDINT_H)
check_include_file_concat("stdio.h" HAVE_STDIO_H)
check_include_file_concat("stdlib.h" HAVE_STDLIB_H)
check_include_file_concat("string.h" HAVE_STRING_H)
check_include_file_concat("strings.h" HAVE_STRINGS_H)
check_include_file_concat("stropts.h" HAVE_STROPTS_H)
check_include_file_concat("termio.h" HAVE_TERMIO_H)
check_include_file_concat("termios.h" HAVE_TERMIOS_H)
check_include_file_concat("time.h" HAVE_TIME_H)
check_include_file_concat("tld.h" HAVE_TLD_H)
check_include_file_concat("unistd.h" HAVE_UNISTD_H)
check_include_file_concat("utime.h" HAVE_UTIME_H)
check_include_file_concat("x509.h" HAVE_X509_H)
check_include_file_concat("process.h" HAVE_PROCESS_H)
check_include_file_concat("stddef.h" HAVE_STDDEF_H)
check_include_file_concat("dlfcn.h" HAVE_DLFCN_H)
check_include_file_concat("malloc.h" HAVE_MALLOC_H)
check_include_file_concat("memory.h" HAVE_MEMORY_H)
check_include_file_concat("ldap.h" HAVE_LDAP_H)
check_include_file_concat("netinet/if_ether.h" HAVE_NETINET_IF_ETHER_H)
check_include_file_concat("stdint.h" HAVE_STDINT_H)
check_include_file_concat("sockio.h" HAVE_SOCKIO_H)
check_include_file_concat("sys/utsname.h" HAVE_SYS_UTSNAME_H)
check_include_file_concat("idna.h" HAVE_IDNA_H)
if(NOT HAVE_LDAP_H)
message(STATUS "LDAP_H not found CURL_DISABLE_LDAP set ON")
set(CURL_DISABLE_LDAP ON CACHE BOOL "" FORCE)
endif()
# No ldap, no ldaps.
if(CURL_DISABLE_LDAP)
if(NOT CURL_DISABLE_LDAPS)
message(STATUS "LDAP needs to be enabled to support LDAPS")
set(CURL_DISABLE_LDAPS ON CACHE BOOL "" FORCE)
endif()
endif()
check_type_size(size_t SIZEOF_SIZE_T)
check_type_size(ssize_t SIZEOF_SSIZE_T)
check_type_size("long long" SIZEOF_LONG_LONG)
check_type_size("long" SIZEOF_LONG)
check_type_size("short" SIZEOF_SHORT)
check_type_size("int" SIZEOF_INT)
check_type_size("__int64" SIZEOF___INT64)
check_type_size("long double" SIZEOF_LONG_DOUBLE)
check_type_size("time_t" SIZEOF_TIME_T)
if(NOT HAVE_SIZEOF_SSIZE_T)
if(SIZEOF_LONG EQUAL SIZEOF_SIZE_T)
set(ssize_t long)
endif(SIZEOF_LONG EQUAL SIZEOF_SIZE_T)
if(NOT ssize_t AND SIZEOF___INT64 EQUAL SIZEOF_SIZE_T)
set(ssize_t __int64)
endif(NOT ssize_t AND SIZEOF___INT64 EQUAL SIZEOF_SIZE_T)
endif(NOT HAVE_SIZEOF_SSIZE_T)
# Different sizeofs, etc.
# define CURL_SIZEOF_LONG 4
# define CURL_TYPEOF_CURL_OFF_T long long
# define CURL_FORMAT_CURL_OFF_T "lld"
# define CURL_FORMAT_CURL_OFF_TU "llu"
# define CURL_FORMAT_OFF_T "%lld"
# define CURL_SIZEOF_CURL_OFF_T 8
# define CURL_SUFFIX_CURL_OFF_T LL
# define CURL_SUFFIX_CURL_OFF_TU ULL
set(CURL_SIZEOF_LONG ${SIZEOF_LONG})
if(SIZEOF_LONG EQUAL 8)
set(CURL_TYPEOF_CURL_OFF_T long)
set(CURL_SIZEOF_CURL_OFF_T 8)
set(CURL_FORMAT_CURL_OFF_T "ld")
set(CURL_FORMAT_CURL_OFF_TU "lu")
set(CURL_FORMAT_OFF_T "%ld")
set(CURL_SUFFIX_CURL_OFF_T L)
set(CURL_SUFFIX_CURL_OFF_TU UL)
endif(SIZEOF_LONG EQUAL 8)
if(SIZEOF_LONG_LONG EQUAL 8)
set(CURL_TYPEOF_CURL_OFF_T "long long")
set(CURL_SIZEOF_CURL_OFF_T 8)
set(CURL_FORMAT_CURL_OFF_T "lld")
set(CURL_FORMAT_CURL_OFF_TU "llu")
set(CURL_FORMAT_OFF_T "%lld")
set(CURL_SUFFIX_CURL_OFF_T LL)
set(CURL_SUFFIX_CURL_OFF_TU ULL)
endif(SIZEOF_LONG_LONG EQUAL 8)
if(NOT CURL_TYPEOF_CURL_OFF_T)
set(CURL_TYPEOF_CURL_OFF_T ${ssize_t})
set(CURL_SIZEOF_CURL_OFF_T ${SIZEOF_SSIZE_T})
# TODO: need adjustment here.
set(CURL_FORMAT_CURL_OFF_T "ld")
set(CURL_FORMAT_CURL_OFF_TU "lu")
set(CURL_FORMAT_OFF_T "%ld")
set(CURL_SUFFIX_CURL_OFF_T L)
set(CURL_SUFFIX_CURL_OFF_TU LU)
endif(NOT CURL_TYPEOF_CURL_OFF_T)
if(HAVE_SIZEOF_LONG_LONG)
set(HAVE_LONGLONG 1)
set(HAVE_LL 1)
endif(HAVE_SIZEOF_LONG_LONG)
find_file(RANDOM_FILE urandom /dev)
mark_as_advanced(RANDOM_FILE)
# Check for some functions that are used
check_symbol_exists(basename "${CURL_INCLUDES}" HAVE_BASENAME)
check_symbol_exists(socket "${CURL_INCLUDES}" HAVE_SOCKET)
check_symbol_exists(poll "${CURL_INCLUDES}" HAVE_POLL)
check_symbol_exists(select "${CURL_INCLUDES}" HAVE_SELECT)
check_symbol_exists(strdup "${CURL_INCLUDES}" HAVE_STRDUP)
check_symbol_exists(strstr "${CURL_INCLUDES}" HAVE_STRSTR)
check_symbol_exists(strtok_r "${CURL_INCLUDES}" HAVE_STRTOK_R)
check_symbol_exists(strftime "${CURL_INCLUDES}" HAVE_STRFTIME)
check_symbol_exists(uname "${CURL_INCLUDES}" HAVE_UNAME)
check_symbol_exists(strcasecmp "${CURL_INCLUDES}" HAVE_STRCASECMP)
check_symbol_exists(stricmp "${CURL_INCLUDES}" HAVE_STRICMP)
check_symbol_exists(strcmpi "${CURL_INCLUDES}" HAVE_STRCMPI)
check_symbol_exists(strncmpi "${CURL_INCLUDES}" HAVE_STRNCMPI)
check_symbol_exists(alarm "${CURL_INCLUDES}" HAVE_ALARM)
if(NOT HAVE_STRNCMPI)
set(HAVE_STRCMPI)
endif(NOT HAVE_STRNCMPI)
check_symbol_exists(gethostbyaddr "${CURL_INCLUDES}" HAVE_GETHOSTBYADDR)
check_symbol_exists(gethostbyaddr_r "${CURL_INCLUDES}" HAVE_GETHOSTBYADDR_R)
check_symbol_exists(gettimeofday "${CURL_INCLUDES}" HAVE_GETTIMEOFDAY)
check_symbol_exists(inet_addr "${CURL_INCLUDES}" HAVE_INET_ADDR)
check_symbol_exists(inet_ntoa "${CURL_INCLUDES}" HAVE_INET_NTOA)
check_symbol_exists(inet_ntoa_r "${CURL_INCLUDES}" HAVE_INET_NTOA_R)
check_symbol_exists(tcsetattr "${CURL_INCLUDES}" HAVE_TCSETATTR)
check_symbol_exists(tcgetattr "${CURL_INCLUDES}" HAVE_TCGETATTR)
check_symbol_exists(perror "${CURL_INCLUDES}" HAVE_PERROR)
check_symbol_exists(closesocket "${CURL_INCLUDES}" HAVE_CLOSESOCKET)
check_symbol_exists(setvbuf "${CURL_INCLUDES}" HAVE_SETVBUF)
check_symbol_exists(sigsetjmp "${CURL_INCLUDES}" HAVE_SIGSETJMP)
check_symbol_exists(getpass_r "${CURL_INCLUDES}" HAVE_GETPASS_R)
check_symbol_exists(strlcat "${CURL_INCLUDES}" HAVE_STRLCAT)
check_symbol_exists(getpwuid "${CURL_INCLUDES}" HAVE_GETPWUID)
check_symbol_exists(geteuid "${CURL_INCLUDES}" HAVE_GETEUID)
check_symbol_exists(utime "${CURL_INCLUDES}" HAVE_UTIME)
if(CMAKE_USE_OPENSSL)
check_symbol_exists(RAND_status "${CURL_INCLUDES}" HAVE_RAND_STATUS)
check_symbol_exists(RAND_screen "${CURL_INCLUDES}" HAVE_RAND_SCREEN)
check_symbol_exists(RAND_egd "${CURL_INCLUDES}" HAVE_RAND_EGD)
check_symbol_exists(CRYPTO_cleanup_all_ex_data "${CURL_INCLUDES}"
HAVE_CRYPTO_CLEANUP_ALL_EX_DATA)
if(HAVE_LIBCRYPTO AND HAVE_LIBSSL)
set(USE_OPENSSL 1)
set(USE_SSLEAY 1)
endif(HAVE_LIBCRYPTO AND HAVE_LIBSSL)
endif(CMAKE_USE_OPENSSL)
check_symbol_exists(gmtime_r "${CURL_INCLUDES}" HAVE_GMTIME_R)
check_symbol_exists(localtime_r "${CURL_INCLUDES}" HAVE_LOCALTIME_R)
check_symbol_exists(gethostbyname "${CURL_INCLUDES}" HAVE_GETHOSTBYNAME)
check_symbol_exists(gethostbyname_r "${CURL_INCLUDES}" HAVE_GETHOSTBYNAME_R)
check_symbol_exists(signal "${CURL_INCLUDES}" HAVE_SIGNAL_FUNC)
check_symbol_exists(SIGALRM "${CURL_INCLUDES}" HAVE_SIGNAL_MACRO)
if(HAVE_SIGNAL_FUNC AND HAVE_SIGNAL_MACRO)
set(HAVE_SIGNAL 1)
endif(HAVE_SIGNAL_FUNC AND HAVE_SIGNAL_MACRO)
check_symbol_exists(uname "${CURL_INCLUDES}" HAVE_UNAME)
check_symbol_exists(strtoll "${CURL_INCLUDES}" HAVE_STRTOLL)
check_symbol_exists(_strtoi64 "${CURL_INCLUDES}" HAVE__STRTOI64)
check_symbol_exists(strerror_r "${CURL_INCLUDES}" HAVE_STRERROR_R)
check_symbol_exists(siginterrupt "${CURL_INCLUDES}" HAVE_SIGINTERRUPT)
check_symbol_exists(perror "${CURL_INCLUDES}" HAVE_PERROR)
check_symbol_exists(fork "${CURL_INCLUDES}" HAVE_FORK)
check_symbol_exists(freeaddrinfo "${CURL_INCLUDES}" HAVE_FREEADDRINFO)
check_symbol_exists(freeifaddrs "${CURL_INCLUDES}" HAVE_FREEIFADDRS)
check_symbol_exists(pipe "${CURL_INCLUDES}" HAVE_PIPE)
check_symbol_exists(ftruncate "${CURL_INCLUDES}" HAVE_FTRUNCATE)
check_symbol_exists(getprotobyname "${CURL_INCLUDES}" HAVE_GETPROTOBYNAME)
check_symbol_exists(getrlimit "${CURL_INCLUDES}" HAVE_GETRLIMIT)
check_symbol_exists(idn_free "${CURL_INCLUDES}" HAVE_IDN_FREE)
check_symbol_exists(idna_strerror "${CURL_INCLUDES}" HAVE_IDNA_STRERROR)
check_symbol_exists(tld_strerror "${CURL_INCLUDES}" HAVE_TLD_STRERROR)
check_symbol_exists(setlocale "${CURL_INCLUDES}" HAVE_SETLOCALE)
check_symbol_exists(setrlimit "${CURL_INCLUDES}" HAVE_SETRLIMIT)
check_symbol_exists(fcntl "${CURL_INCLUDES}" HAVE_FCNTL)
check_symbol_exists(ioctl "${CURL_INCLUDES}" HAVE_IOCTL)
check_symbol_exists(setsockopt "${CURL_INCLUDES}" HAVE_SETSOCKOPT)
# symbol exists in win32, but function does not.
check_function_exists(inet_pton HAVE_INET_PTON)
# sigaction and sigsetjmp are special. Use special mechanism for
# detecting those, but only if previous attempt failed.
if(HAVE_SIGNAL_H)
check_symbol_exists(sigaction "signal.h" HAVE_SIGACTION)
endif(HAVE_SIGNAL_H)
if(NOT HAVE_SIGSETJMP)
if(HAVE_SETJMP_H)
check_symbol_exists(sigsetjmp "setjmp.h" HAVE_MACRO_SIGSETJMP)
if(HAVE_MACRO_SIGSETJMP)
set(HAVE_SIGSETJMP 1)
endif(HAVE_MACRO_SIGSETJMP)
endif(HAVE_SETJMP_H)
endif(NOT HAVE_SIGSETJMP)
# If there is no stricmp(), do not allow LDAP to parse URLs
if(NOT HAVE_STRICMP)
set(HAVE_LDAP_URL_PARSE 1)
endif(NOT HAVE_STRICMP)
# For other curl specific tests, use this macro.
macro(CURL_INTERNAL_TEST CURL_TEST)
if("${CURL_TEST}" MATCHES "^${CURL_TEST}$")
set(MACRO_CHECK_FUNCTION_DEFINITIONS
"-D${CURL_TEST} ${CURL_TEST_DEFINES} ${CMAKE_REQUIRED_FLAGS}")
if(CMAKE_REQUIRED_LIBRARIES)
set(CURL_TEST_ADD_LIBRARIES
"-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}")
endif(CMAKE_REQUIRED_LIBRARIES)
message(STATUS "Performing Curl Test ${CURL_TEST}")
try_compile(${CURL_TEST}
${CMAKE_BINARY_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/CMake/CurlTests.c
CMAKE_FLAGS -DCOMPILE_DEFINITIONS:STRING=${MACRO_CHECK_FUNCTION_DEFINITIONS}
"${CURL_TEST_ADD_LIBRARIES}"
OUTPUT_VARIABLE OUTPUT)
if(${CURL_TEST})
set(${CURL_TEST} 1 CACHE INTERNAL "Curl test ${FUNCTION}")
message(STATUS "Performing Curl Test ${CURL_TEST} - Success")
file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log
"Performing Curl Test ${CURL_TEST} passed with the following output:\n"
"${OUTPUT}\n")
else(${CURL_TEST})
message(STATUS "Performing Curl Test ${CURL_TEST} - Failed")
set(${CURL_TEST} "" CACHE INTERNAL "Curl test ${FUNCTION}")
file(APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
"Performing Curl Test ${CURL_TEST} failed with the following output:\n"
"${OUTPUT}\n")
endif(${CURL_TEST})
endif("${CURL_TEST}" MATCHES "^${CURL_TEST}$")
endmacro(CURL_INTERNAL_TEST)
macro(CURL_INTERNAL_TEST_RUN CURL_TEST)
if("${CURL_TEST}_COMPILE" MATCHES "^${CURL_TEST}_COMPILE$")
set(MACRO_CHECK_FUNCTION_DEFINITIONS
"-D${CURL_TEST} ${CMAKE_REQUIRED_FLAGS}")
if(CMAKE_REQUIRED_LIBRARIES)
set(CURL_TEST_ADD_LIBRARIES
"-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}")
endif(CMAKE_REQUIRED_LIBRARIES)
message(STATUS "Performing Curl Test ${CURL_TEST}")
try_run(${CURL_TEST} ${CURL_TEST}_COMPILE
${CMAKE_BINARY_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/CMake/CurlTests.c
CMAKE_FLAGS -DCOMPILE_DEFINITIONS:STRING=${MACRO_CHECK_FUNCTION_DEFINITIONS}
"${CURL_TEST_ADD_LIBRARIES}"
OUTPUT_VARIABLE OUTPUT)
if(${CURL_TEST}_COMPILE AND NOT ${CURL_TEST})
set(${CURL_TEST} 1 CACHE INTERNAL "Curl test ${FUNCTION}")
message(STATUS "Performing Curl Test ${CURL_TEST} - Success")
else(${CURL_TEST}_COMPILE AND NOT ${CURL_TEST})
message(STATUS "Performing Curl Test ${CURL_TEST} - Failed")
set(${CURL_TEST} "" CACHE INTERNAL "Curl test ${FUNCTION}")
file(APPEND "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log"
"Performing Curl Test ${CURL_TEST} failed with the following output:\n"
"${OUTPUT}")
if(${CURL_TEST}_COMPILE)
file(APPEND
"${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log"
"There was a problem running this test\n")
endif(${CURL_TEST}_COMPILE)
file(APPEND "${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log"
"\n\n")
endif(${CURL_TEST}_COMPILE AND NOT ${CURL_TEST})
endif("${CURL_TEST}_COMPILE" MATCHES "^${CURL_TEST}_COMPILE$")
endmacro(CURL_INTERNAL_TEST_RUN)
# Do curl specific tests
foreach(CURL_TEST
HAVE_FCNTL_O_NONBLOCK
HAVE_IOCTLSOCKET
HAVE_IOCTLSOCKET_CAMEL
HAVE_IOCTLSOCKET_CAMEL_FIONBIO
HAVE_IOCTLSOCKET_FIONBIO
HAVE_IOCTL_FIONBIO
HAVE_IOCTL_SIOCGIFADDR
HAVE_SETSOCKOPT_SO_NONBLOCK
HAVE_SOCKADDR_IN6_SIN6_SCOPE_ID
TIME_WITH_SYS_TIME
HAVE_O_NONBLOCK
HAVE_GETHOSTBYADDR_R_5
HAVE_GETHOSTBYADDR_R_7
HAVE_GETHOSTBYADDR_R_8
HAVE_GETHOSTBYADDR_R_5_REENTRANT
HAVE_GETHOSTBYADDR_R_7_REENTRANT
HAVE_GETHOSTBYADDR_R_8_REENTRANT
HAVE_GETHOSTBYNAME_R_3
HAVE_GETHOSTBYNAME_R_5
HAVE_GETHOSTBYNAME_R_6
HAVE_GETHOSTBYNAME_R_3_REENTRANT
HAVE_GETHOSTBYNAME_R_5_REENTRANT
HAVE_GETHOSTBYNAME_R_6_REENTRANT
HAVE_SOCKLEN_T
HAVE_IN_ADDR_T
HAVE_BOOL_T
STDC_HEADERS
RETSIGTYPE_TEST
HAVE_INET_NTOA_R_DECL
HAVE_INET_NTOA_R_DECL_REENTRANT
HAVE_GETADDRINFO
HAVE_FILE_OFFSET_BITS
)
curl_internal_test(${CURL_TEST})
endforeach(CURL_TEST)
if(HAVE_FILE_OFFSET_BITS)
set(_FILE_OFFSET_BITS 64)
endif(HAVE_FILE_OFFSET_BITS)
foreach(CURL_TEST
HAVE_GLIBC_STRERROR_R
HAVE_POSIX_STRERROR_R
)
curl_internal_test_run(${CURL_TEST})
endforeach(CURL_TEST)
# Check for reentrant
foreach(CURL_TEST
HAVE_GETHOSTBYADDR_R_5
HAVE_GETHOSTBYADDR_R_7
HAVE_GETHOSTBYADDR_R_8
HAVE_GETHOSTBYNAME_R_3
HAVE_GETHOSTBYNAME_R_5
HAVE_GETHOSTBYNAME_R_6
HAVE_INET_NTOA_R_DECL_REENTRANT)
if(NOT ${CURL_TEST})
if(${CURL_TEST}_REENTRANT)
set(NEED_REENTRANT 1)
endif(${CURL_TEST}_REENTRANT)
endif(NOT ${CURL_TEST})
endforeach(CURL_TEST)
if(NEED_REENTRANT)
foreach(CURL_TEST
HAVE_GETHOSTBYADDR_R_5
HAVE_GETHOSTBYADDR_R_7
HAVE_GETHOSTBYADDR_R_8
HAVE_GETHOSTBYNAME_R_3
HAVE_GETHOSTBYNAME_R_5
HAVE_GETHOSTBYNAME_R_6)
set(${CURL_TEST} 0)
if(${CURL_TEST}_REENTRANT)
set(${CURL_TEST} 1)
endif(${CURL_TEST}_REENTRANT)
endforeach(CURL_TEST)
endif(NEED_REENTRANT)
if(HAVE_INET_NTOA_R_DECL_REENTRANT)
set(HAVE_INET_NTOA_R_DECL 1)
set(NEED_REENTRANT 1)
endif(HAVE_INET_NTOA_R_DECL_REENTRANT)
# Some other minor tests
if(NOT HAVE_IN_ADDR_T)
set(in_addr_t "unsigned long")
endif(NOT HAVE_IN_ADDR_T)
# Fix libz / zlib.h
if(NOT CURL_SPECIAL_LIBZ)
if(NOT HAVE_LIBZ)
set(HAVE_ZLIB_H 0)
endif(NOT HAVE_LIBZ)
if(NOT HAVE_ZLIB_H)
set(HAVE_LIBZ 0)
endif(NOT HAVE_ZLIB_H)
endif(NOT CURL_SPECIAL_LIBZ)
if(_FILE_OFFSET_BITS)
set(_FILE_OFFSET_BITS 64)
endif(_FILE_OFFSET_BITS)
set(CMAKE_REQUIRED_FLAGS "-D_FILE_OFFSET_BITS=64")
set(CMAKE_EXTRA_INCLUDE_FILES "${CMAKE_CURRENT_SOURCE_DIR}/curl/curl.h")
check_type_size("curl_off_t" SIZEOF_CURL_OFF_T)
set(CMAKE_EXTRA_INCLUDE_FILES)
set(CMAKE_REQUIRED_FLAGS)
# Check for nonblocking
set(HAVE_DISABLED_NONBLOCKING 1)
if(HAVE_FIONBIO OR
HAVE_IOCTLSOCKET OR
HAVE_IOCTLSOCKET_CASE OR
HAVE_O_NONBLOCK)
set(HAVE_DISABLED_NONBLOCKING)
endif(HAVE_FIONBIO OR
HAVE_IOCTLSOCKET OR
HAVE_IOCTLSOCKET_CASE OR
HAVE_O_NONBLOCK)
if(RETSIGTYPE_TEST)
set(RETSIGTYPE void)
else(RETSIGTYPE_TEST)
set(RETSIGTYPE int)
endif(RETSIGTYPE_TEST)
if(CMAKE_COMPILER_IS_GNUCC AND APPLE)
include(CheckCCompilerFlag)
check_c_compiler_flag(-Wno-long-double HAVE_C_FLAG_Wno_long_double)
if(HAVE_C_FLAG_Wno_long_double)
# The Mac version of GCC warns about use of long double. Disable it.
get_source_file_property(MPRINTF_COMPILE_FLAGS mprintf.c COMPILE_FLAGS)
if(MPRINTF_COMPILE_FLAGS)
set(MPRINTF_COMPILE_FLAGS "${MPRINTF_COMPILE_FLAGS} -Wno-long-double")
else(MPRINTF_COMPILE_FLAGS)
set(MPRINTF_COMPILE_FLAGS "-Wno-long-double")
endif(MPRINTF_COMPILE_FLAGS)
set_source_files_properties(mprintf.c PROPERTIES
COMPILE_FLAGS ${MPRINTF_COMPILE_FLAGS})
endif(HAVE_C_FLAG_Wno_long_double)
endif(CMAKE_COMPILER_IS_GNUCC AND APPLE)
if(HAVE_SOCKLEN_T)
set(CURL_TYPEOF_CURL_SOCKLEN_T "socklen_t")
if(WIN32)
set(CMAKE_EXTRA_INCLUDE_FILES "winsock2.h;ws2tcpip.h")
elseif(HAVE_SYS_SOCKET_H)
set(CMAKE_EXTRA_INCLUDE_FILES "sys/socket.h")
endif()
check_type_size("socklen_t" CURL_SIZEOF_CURL_SOCKLEN_T)
set(CMAKE_EXTRA_INCLUDE_FILES)
if(NOT HAVE_CURL_SIZEOF_CURL_SOCKLEN_T)
message(FATAL_ERROR
"Check for sizeof socklen_t failed, see CMakeFiles/CMakerror.log")
endif()
else()
set(CURL_TYPEOF_CURL_SOCKLEN_T int)
set(CURL_SIZEOF_CURL_SOCKLEN_T ${SIZEOF_INT})
endif()
include(CMake/OtherTests.cmake)
add_definitions(-DHAVE_CONFIG_H)
# For windows, do not allow the compiler to use default target (Vista).
if(WIN32)
add_definitions(-D_WIN32_WINNT=0x0501)
endif(WIN32)
if(MSVC)
add_definitions(-D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE)
endif(MSVC)
# Sets up the dependencies (zlib, OpenSSL, etc.) of a cURL subproject according to options.
# TODO This is far to be complete!
function(SETUP_CURL_DEPENDENCIES TARGET_NAME)
if(CURL_ZLIB AND ZLIB_FOUND)
include_directories(${ZLIB_INCLUDE_DIR})
#ADD_DEFINITIONS( -DHAVE_ZLIB_H -DHAVE_ZLIB -DHAVE_LIBZ )
endif()
if(CMAKE_USE_OPENSSL AND OPENSSL_FOUND)
include_directories(${OPENSSL_INCLUDE_DIR})
endif()
if(CMAKE_USE_OPENSSL AND CURL_CONFIG_HAS_BEEN_RUN_BEFORE)
#ADD_DEFINITIONS( -DUSE_SSLEAY )
endif()
target_link_libraries(${TARGET_NAME} ${CURL_LIBS})
endfunction()
# Ugly (but functional) way to include "Makefile.inc" by transforming it (= regenerate it).
function(TRANSFORM_MAKEFILE_INC INPUT_FILE OUTPUT_FILE)
file(READ ${INPUT_FILE} MAKEFILE_INC_TEXT)
string(REPLACE "$(top_srcdir)" "\${CURL_SOURCE_DIR}" MAKEFILE_INC_TEXT ${MAKEFILE_INC_TEXT})
string(REPLACE "$(top_builddir)" "\${CURL_BINARY_DIR}" MAKEFILE_INC_TEXT ${MAKEFILE_INC_TEXT})
string(REGEX REPLACE "\\\\\n" "§!§" MAKEFILE_INC_TEXT ${MAKEFILE_INC_TEXT})
string(REGEX REPLACE "([a-zA-Z_][a-zA-Z0-9_]*)[\t ]*=[\t ]*([^\n]*)" "SET(\\1 \\2)" MAKEFILE_INC_TEXT ${MAKEFILE_INC_TEXT})
string(REPLACE "§!§" "\n" MAKEFILE_INC_TEXT ${MAKEFILE_INC_TEXT})
string(REGEX REPLACE "\\$\\(([a-zA-Z_][a-zA-Z0-9_]*)\\)" "\${\\1}" MAKEFILE_INC_TEXT ${MAKEFILE_INC_TEXT}) # Replace $() with ${}
string(REGEX REPLACE "@([a-zA-Z_][a-zA-Z0-9_]*)@" "\${\\1}" MAKEFILE_INC_TEXT ${MAKEFILE_INC_TEXT}) # Replace @@ with ${}, even if that may not be read by CMake scripts.
file(WRITE ${OUTPUT_FILE} ${MAKEFILE_INC_TEXT})
endfunction()
add_subdirectory(lib)
if(BUILD_CURL_EXE)
add_subdirectory(src)
endif()
if(BUILD_CURL_TESTS)
add_subdirectory(tests)
endif()
# This needs to be run very last so other parts of the scripts can take advantage of this.
if(NOT CURL_CONFIG_HAS_BEEN_RUN_BEFORE)
set(CURL_CONFIG_HAS_BEEN_RUN_BEFORE 1 CACHE INTERNAL "Flag to track whether this is the first time running CMake or if CMake has been configured before")
endif()
# Installation.
# First, install generated curlbuild.h
install(FILES "${CMAKE_CURRENT_BINARY_DIR}/include/curl/curlbuild.h"
DESTINATION include/curl )
# Next, install other headers excluding curlbuild.h
install(DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/include/curl"
DESTINATION include
FILES_MATCHING PATTERN "*.h"
PATTERN "curlbuild.h" EXCLUDE)
# Workaround for MSVS10 to avoid the Dialog Hell
# FIXME: This could be removed with future version of CMake.
if(MSVC_VERSION EQUAL 1600)
set(CURL_SLN_FILENAME "${CMAKE_CURRENT_BINARY_DIR}/CURL.sln")
if(EXISTS "${CURL_SLN_FILENAME}")
file(APPEND "${CURL_SLN_FILENAME}" "\n# This should be regenerated!\n")
endif()
endif()

View File

@ -1,6 +1,6 @@
COPYRIGHT AND PERMISSION NOTICE
Copyright (c) 1996 - 2013, Daniel Stenberg, <daniel@haxx.se>.
Copyright (c) 1996 - 2008, Daniel Stenberg, <daniel@haxx.se>.
All rights reserved.

File diff suppressed because it is too large Load Diff

View File

@ -1,132 +0,0 @@
#!/bin/bash
# This script performs all of the steps needed to build a
# universal binary libcurl.framework for Mac OS X 10.4 or greater.
#
# Hendrik Visage:
# Generalizations added since Snowleopard (10.6) do not include
# the 10.4u SDK.
#
# Also note:
# 10.5 is the *ONLY* SDK that support PPC64 :( -- 10.6 do not have ppc64 support
#If you need to have PPC64 support then change below to 1
PPC64_NEEDED=0
# For me the default is to develop for the platform I am on, and if you
#desire compatibility with older versions then change USE_OLD to 1 :)
USE_OLD=0
VERSION=`/usr/bin/sed -ne 's/^#define LIBCURL_VERSION "\(.*\)"/\1/p' include/curl/curlver.h`
FRAMEWORK_VERSION=Versions/Release-$VERSION
#I also wanted to "copy over" the system, and thus the reason I added the
# version to Versions/Release-7.20.1 etc.
# now a simple rsync -vaP libcurl.framework /Library/Frameworks will install it
# and setup the right paths to this version, leaving the system version
# "intact", so you can "fix" it later with the links to Versions/A/...
OLD_SDK=`ls /Developer/SDKs|head -1`
NEW_SDK=`ls -r /Developer/SDKs|head -1`
if test "0"$USE_OLD -gt 0
then
SDK32=$OLD_SDK
else
SDK32=$NEW_SDK
fi
MACVER=`echo $SDK32|sed -e s/[a-zA-Z]//g -e s/.\$//`
SDK32_DIR='/Developer/SDKs/'$SDK32
MINVER32='-mmacosx-version-min='$MACVER
ARCHES32='-arch i386 -arch ppc'
if test $PPC64_NEEDED -gt 0
then
SDK64=10.5
ARCHES64='-arch x86_64 -arch ppc64'
SDK64=`ls /Developer/SDKs|grep 10.5|head -1`
else
ARCHES64='-arch x86_64'
#We "know" that 10.4 and earlier do not support 64bit
OLD_SDK64=`ls /Developer/SDKs|egrep -v "10.[0-4]"|head -1`
NEW_SDK64=`ls -r /Developer/SDKs|egrep -v "10.[0-4]"|head -1`
if test $USE_OLD -gt 0
then
SDK64=$OLD_SDK64
else
SDK64=$NEW_SDK64
fi
fi
SDK64_DIR='/Developer/SDKs/'$SDK64
MACVER64=`echo $SDK64|sed -e s/[a-zA-Z]//g -e s/.\$//`
MINVER64='-mmacosx-version-min='$MACVER64
if test ! -z $SDK32; then
echo "----Configuring libcurl for 32 bit universal framework..."
make clean
./configure --disable-dependency-tracking --disable-static --with-gssapi \
CFLAGS="-Os -isysroot $SDK32_DIR $ARCHES32 $MINVER32" \
LDFLAGS="-Wl,-syslibroot,$SDK32_DIR $ARCHES32 $MINVER32 -Wl,-headerpad_max_install_names" \
CC=$CC
echo "----Building 32 bit libcurl..."
make
echo "----Creating 32 bit framework..."
rm -r libcurl.framework
mkdir -p libcurl.framework/${FRAMEWORK_VERSION}/Resources
cp lib/.libs/libcurl.dylib libcurl.framework/${FRAMEWORK_VERSION}/libcurl
install_name_tool -id @executable_path/../Frameworks/libcurl.framework/${FRAMEWORK_VERSION}/libcurl libcurl.framework/${FRAMEWORK_VERSION}/libcurl
/usr/bin/sed -e "s/7\.12\.3/$VERSION/" lib/libcurl.plist >libcurl.framework/${FRAMEWORK_VERSION}/Resources/Info.plist
mkdir -p libcurl.framework/${FRAMEWORK_VERSION}/Headers/curl
cp include/curl/*.h libcurl.framework/${FRAMEWORK_VERSION}/Headers/curl
pushd libcurl.framework
ln -fs ${FRAMEWORK_VERSION}/libcurl libcurl
ln -fs ${FRAMEWORK_VERSION}/Resources Resources
ln -fs ${FRAMEWORK_VERSION}/Headers Headers
cd Versions
ln -fs ${FRAMEWORK_VERSION} Current
echo TEsting for SDK64
if test -d $SDK64_DIR; then
echo entering...
popd
make clean
echo "----Configuring libcurl for 64 bit universal framework..."
./configure --disable-dependency-tracking --disable-static --with-gssapi \
CFLAGS="-Os -isysroot $SDK64_DIR $ARCHES64 $MINVER64" \
LDFLAGS="-Wl,-syslibroot,$SDK64_DIR $ARCHES64 $MINVER64 -Wl,-headerpad_max_install_names" \
CC=$CC
echo "----Building 64 bit libcurl..."
make
echo "----Appending 64 bit framework to 32 bit framework..."
cp lib/.libs/libcurl.dylib libcurl.framework/${FRAMEWORK_VERSION}/libcurl64
install_name_tool -id @executable_path/../Frameworks/libcurl.framework/${FRAMEWORK_VERSION}/libcurl libcurl.framework/${FRAMEWORK_VERSION}/libcurl64
cp libcurl.framework/${FRAMEWORK_VERSION}/libcurl libcurl.framework/${FRAMEWORK_VERSION}/libcurl32
pwd
lipo libcurl.framework/${FRAMEWORK_VERSION}/libcurl32 libcurl.framework/${FRAMEWORK_VERSION}/libcurl64 -create -output libcurl.framework/${FRAMEWORK_VERSION}/libcurl
rm libcurl.framework/${FRAMEWORK_VERSION}/libcurl32 libcurl.framework/${FRAMEWORK_VERSION}/libcurl64
cp libcurl.framework/${FRAMEWORK_VERSION}/Headers/curl/curlbuild.h libcurl.framework/${FRAMEWORK_VERSION}/Headers/curl/curlbuild32.h
cp include/curl/curlbuild.h libcurl.framework/${FRAMEWORK_VERSION}/Headers/curl/curlbuild64.h
cat >libcurl.framework/${FRAMEWORK_VERSION}/Headers/curl/curlbuild.h <<EOF
#ifdef __LP64__
#include "curl/curlbuild64.h"
#else
#include "curl/curlbuild32.h"
#endif
EOF
fi
pwd
lipo -info libcurl.framework/${FRAMEWORK_VERSION}/libcurl
echo "libcurl.framework is built and can now be included in other projects."
echo "Copy libcurl.framework to your bundle's Contents/Frameworks folder, ~/Library/Frameworks or /Library/Frameworks."
else
echo "Building libcurl.framework requires Mac OS X 10.4 or later with the MacOSX10.4/5/6 SDK installed."
fi

View File

@ -5,7 +5,7 @@
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 1998 - 2010, Daniel Stenberg, <daniel@haxx.se>, et al.
# Copyright (C) 1998 - 2008, Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
@ -18,6 +18,7 @@
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# $Id: Makefile.dist,v 1.46 2008-08-28 20:08:37 bagder Exp $
###########################################################################
VC=vc6
@ -32,68 +33,71 @@ ssl:
borland:
cd lib
$(MAKE) -f Makefile.b32
make -f Makefile.b32
cd ..\src
$(MAKE) -f Makefile.b32
make -f Makefile.b32
borland-ssl:
cd lib
$(MAKE) -f Makefile.b32 WITH_SSL=1
make -f Makefile.b32 WITH_SSL=1
cd ..\src
$(MAKE) -f Makefile.b32 WITH_SSL=1
make -f Makefile.b32 WITH_SSL=1
borland-ssl-zlib:
cd lib
$(MAKE) -f Makefile.b32 WITH_SSL=1 WITH_ZLIB=1
make -f Makefile.b32 WITH_SSL=1 WITH_ZLIB=1
cd ..\src
$(MAKE) -f Makefile.b32 WITH_SSL=1 WITH_ZLIB=1
make -f Makefile.b32 WITH_SSL=1 WITH_ZLIB=1
borland-clean:
cd lib
$(MAKE) -f Makefile.b32 clean
make -f Makefile.b32 clean
cd ..\src
$(MAKE) -f Makefile.b32 clean
make -f Makefile.b32 clean
watcom: .SYMBOLIC
cd lib && $(MAKE) -u -f Makefile.Watcom
cd src && $(MAKE) -u -f Makefile.Watcom
watcom:
cd lib
wmake -f Makefile.Watcom
cd ..\src
wmake -f Makefile.Watcom
watcom-clean: .SYMBOLIC
cd lib && $(MAKE) -u -f Makefile.Watcom clean
cd src && $(MAKE) -u -f Makefile.Watcom clean
watcom-vclean: .SYMBOLIC
cd lib && $(MAKE) -u -f Makefile.Watcom vclean
cd src && $(MAKE) -u -f Makefile.Watcom vclean
watcom-clean:
cd lib
wmake -f Makefile.Watcom clean
cd ..\src
wmake -f Makefile.Watcom clean
mingw32:
$(MAKE) -C lib -f Makefile.m32
$(MAKE) -C src -f Makefile.m32
mingw32-zlib:
$(MAKE) -C lib -f Makefile.m32 ZLIB=1
$(MAKE) -C src -f Makefile.m32 ZLIB=1
mingw32-ssl-zlib:
$(MAKE) -C lib -f Makefile.m32 SSL=1 ZLIB=1
$(MAKE) -C src -f Makefile.m32 SSL=1 ZLIB=1
mingw32-ssh2-ssl-zlib:
$(MAKE) -C lib -f Makefile.m32 SSH2=1 SSL=1 ZLIB=1
$(MAKE) -C src -f Makefile.m32 SSH2=1 SSL=1 ZLIB=1
mingw32-ssh2-ssl-sspi-zlib:
$(MAKE) -C lib -f Makefile.m32 SSH2=1 SSL=1 SSPI=1 ZLIB=1
$(MAKE) -C src -f Makefile.m32 SSH2=1 SSL=1 SSPI=1 ZLIB=1
mingw32-clean:
$(MAKE) -C lib -f Makefile.m32 clean
$(MAKE) -C src -f Makefile.m32 clean
$(MAKE) -C docs/examples -f Makefile.m32 clean
mingw32-vclean mingw32-distclean:
$(MAKE) -C lib -f Makefile.m32 vclean
$(MAKE) -C src -f Makefile.m32 vclean
$(MAKE) -C docs/examples -f Makefile.m32 vclean
mingw32-examples%:
$(MAKE) -C docs/examples -f Makefile.m32 CFG=$@
mingw32%:
$(MAKE) -C lib -f Makefile.m32 CFG=$@
$(MAKE) -C src -f Makefile.m32 CFG=$@
vc-clean: $(VC)
vc-clean:
cd lib
nmake -f Makefile.$(VC) clean
cd ..\src
nmake -f Makefile.$(VC) clean
vc-all: $(VC)
vc-all:
cd lib
nmake -f Makefile.$(VC) cfg=release
nmake -f Makefile.$(VC) cfg=release-ssl
@ -118,85 +122,79 @@ vc-all: $(VC)
nmake -f Makefile.$(VC) cfg=debug-dll-zlib-dll
nmake -f Makefile.$(VC) cfg=debug-dll-ssl-dll-zlib-dll
vc: $(VC)
vc:
cd lib
nmake /f Makefile.$(VC) cfg=release
cd ..\src
nmake /f Makefile.$(VC)
vc-x64: $(VC)
vc-x64:
cd lib
nmake /f Makefile.$(VC) MACHINE=x64 cfg=release
MACHINE=x64 nmake /f Makefile.$(VC) cfg=release
cd ..\src
nmake /f Makefile.$(VC) MACHINE=x64 cfg=release
MACHINE=x64 nmake /f Makefile.$(VC)
vc-zlib: $(VC)
vc-zlib:
cd lib
nmake /f Makefile.$(VC) cfg=release-zlib
cd ..\src
nmake /f Makefile.$(VC) cfg=release-zlib
vc-ssl: $(VC)
vc-ssl:
cd lib
nmake /f Makefile.$(VC) cfg=release-ssl
cd ..\src
nmake /f Makefile.$(VC) cfg=release-ssl
vc-ssl-zlib: $(VC)
vc-ssl-zlib:
cd lib
nmake /f Makefile.$(VC) cfg=release-ssl-zlib
cd ..\src
nmake /f Makefile.$(VC) cfg=release-ssl-zlib
vc-x64-ssl-zlib: $(VC)
cd lib
nmake /f Makefile.$(VC) MACHINE=x64 cfg=release-ssl-zlib
cd ..\src
nmake /f Makefile.$(VC) MACHINE=x64 cfg=release-ssl-zlib
vc-ssl-dll: $(VC)
vc-ssl-dll:
cd lib
nmake /f Makefile.$(VC) cfg=release-ssl-dll
cd ..\src
nmake /f Makefile.$(VC) cfg=release-ssl-dll
vc-dll-ssl-dll: $(VC)
vc-dll-ssl-dll:
cd lib
nmake /f Makefile.$(VC) cfg=release-dll-ssl-dll
cd ..\src
nmake /f Makefile.$(VC) cfg=release-dll-ssl-dll
vc-dll: $(VC)
vc-dll:
cd lib
nmake /f Makefile.$(VC) cfg=release-dll
cd ..\src
nmake /f Makefile.$(VC) cfg=release-dll
vc-dll-zlib-dll: $(VC)
vc-dll-zlib-dll:
cd lib
nmake /f Makefile.$(VC) cfg=release-dll-zlib-dll
cd ..\src
nmake /f Makefile.$(VC) cfg=release-dll-zlib-dll
vc-dll-ssl-dll-zlib-dll: $(VC)
vc-dll-ssl-dll-zlib-dll:
cd lib
nmake /f Makefile.$(VC) cfg=release-dll-ssl-dll-zlib-dll
cd ..\src
nmake /f Makefile.$(VC) cfg=release-dll-ssl-dll-zlib-dll
vc-ssl-dll-zlib-dll: $(VC)
vc-ssl-dll-zlib-dll:
cd lib
nmake /f Makefile.$(VC) cfg=release-ssl-dll-zlib-dll
cd ..\src
nmake /f Makefile.$(VC) cfg=release-ssl-dll-zlib-dll
vc-zlib-dll: $(VC)
vc-zlib-dll:
cd lib
nmake /f Makefile.$(VC) cfg=release-zlib-dll
cd ..\src
nmake /f Makefile.$(VC) cfg=release-zlib-dll
vc-sspi: $(VC)
vc-sspi:
cd lib
nmake /f Makefile.$(VC) cfg=release WINDOWS_SSPI=1
cd ..\src
@ -222,27 +220,34 @@ netware:
$(MAKE) -C lib -f Makefile.netware
$(MAKE) -C src -f Makefile.netware
netware-ares:
$(MAKE) -C lib -f Makefile.netware WITH_ARES=1
$(MAKE) -C src -f Makefile.netware WITH_ARES=1
netware-ssl:
$(MAKE) -C lib -f Makefile.netware WITH_SSL=1
$(MAKE) -C src -f Makefile.netware WITH_SSL=1
netware-ssl-zlib:
$(MAKE) -C lib -f Makefile.netware WITH_SSL=1 WITH_ZLIB=1
$(MAKE) -C src -f Makefile.netware WITH_SSL=1 WITH_ZLIB=1
netware-ssh2-ssl-zlib:
$(MAKE) -C lib -f Makefile.netware WITH_SSH2=1 WITH_SSL=1 WITH_ZLIB=1
$(MAKE) -C src -f Makefile.netware WITH_SSH2=1 WITH_SSL=1 WITH_ZLIB=1
netware-zlib:
$(MAKE) -C lib -f Makefile.netware WITH_ZLIB=1
$(MAKE) -C src -f Makefile.netware WITH_ZLIB=1
netware-clean:
$(MAKE) -C lib -f Makefile.netware clean
$(MAKE) -C src -f Makefile.netware clean
$(MAKE) -C docs/examples -f Makefile.netware clean
netware-vclean netware-distclean:
$(MAKE) -C lib -f Makefile.netware vclean
$(MAKE) -C src -f Makefile.netware vclean
$(MAKE) -C docs/examples -f Makefile.netware vclean
netware-install:
$(MAKE) -C lib -f Makefile.netware install
$(MAKE) -C src -f Makefile.netware install
netware-examples-%:
$(MAKE) -C docs/examples -f Makefile.netware CFG=$@
netware-%:
$(MAKE) -C lib -f Makefile.netware CFG=$@
$(MAKE) -C src -f Makefile.netware CFG=$@
unix: all
unix-ssl: ssl
@ -251,40 +256,10 @@ linux: all
linux-ssl: ssl
# We don't need to do anything for vc6.
vc6:
vc8: lib/Makefile.vc8 src/Makefile.vc8
lib/Makefile.vc8: lib/Makefile.vc6
@echo "generate $@"
@sed -e "s#/GX /DWIN32 /YX#/EHsc /DWIN32#" -e "s#/GZ#/RTC1#" -e "s/ws2_32.lib/ws2_32.lib bufferoverflowu.lib/g" -e "s/VC6/VC8/g" lib/Makefile.vc6 > lib/Makefile.vc8
src/Makefile.vc8: src/Makefile.vc6
@echo "generate $@"
@sed -e "s#/GX /DWIN32 /YX#/EHsc /DWIN32#" -e "s#/GZ#/RTC1#" -e "s/ws2_32.lib/ws2_32.lib bufferoverflowu.lib/g" -e "s/VC6/VC8/g" src/Makefile.vc6 > src/Makefile.vc8
# VC9 makefiles are for use with VS2008
vc9: lib/Makefile.vc9 src/Makefile.vc9
lib/Makefile.vc9: lib/Makefile.vc6
@echo "generate $@"
@sed -e "s#/GX /DWIN32 /YX#/EHsc /DWIN32#" -e "s#/GZ#/RTC1#" -e "s/ws2_32.lib/ws2_32.lib/g" -e "s/vc6/vc9/g" -e "s/VC6/VC9/g" lib/Makefile.vc6 > lib/Makefile.vc9
src/Makefile.vc9: src/Makefile.vc6
@echo "generate $@"
@sed -e "s#/GX /DWIN32 /YX#/EHsc /DWIN32#" -e "s#/GZ#/RTC1#" -e "s/ws2_32.lib/ws2_32.lib/g" -e "s/vc6/vc9/g" -e "s/VC6/VC9/g" src/Makefile.vc6 > src/Makefile.vc9
# VC10 makefiles are for use with VS2010
vc10: lib/Makefile.vc10 src/Makefile.vc10
lib/Makefile.vc10: lib/Makefile.vc6
@echo "generate $@"
@sed -e "s#/GX /DWIN32 /YX#/EHsc /DWIN32#" -e "s#/GZ#/RTC1#" -e "s/ws2_32.lib/ws2_32.lib/g" -e "s/vc6/vc10/g" -e "s/VC6/VC10/g" lib/Makefile.vc6 > lib/Makefile.vc10
src/Makefile.vc10: src/Makefile.vc6
@echo "generate $@"
@sed -e "s#/GX /DWIN32 /YX#/EHsc /DWIN32#" -e "s#/GZ#/RTC1#" -e "s/ws2_32.lib/ws2_32.lib/g" -e "s/vc6/vc10/g" -e "s/VC6/VC10/g" src/Makefile.vc6 > src/Makefile.vc10
vc8:
@echo "generate VC8 makefiles"
@sed -e "s#/GX /DWIN32 /YX#/EHsc /DWIN32#" -e "s#/GZ#/RTC1#" -e "s/wsock32.lib/wsock32.lib bufferoverflowu.lib/g" -e "s/VC6/VC8/g" lib/Makefile.vc6 > lib/Makefile.vc8
@sed -e "s#/GX /DWIN32 /YX#/EHsc /DWIN32#" -e "s#/GZ#/RTC1#" -e "s/wsock32.lib/wsock32.lib bufferoverflowu.lib/g" -e "s/VC6/VC8/g" src/Makefile.vc6 > src/Makefile.vc8
ca-bundle: lib/mk-ca-bundle.pl
@echo "generate a fresh ca-bundle.crt"

View File

@ -5,7 +5,7 @@
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
# Copyright (C) 1998 - 2008, Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
@ -18,30 +18,21 @@
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# $Id: Makefile.am,v 1.71 2008-10-01 18:29:13 danf Exp $
###########################################################################
AUTOMAKE_OPTIONS = foreign
ACLOCAL_AMFLAGS = -I m4
CMAKE_DIST = CMakeLists.txt CMake/CMakeConfigurableFile.in \
CMake/CurlCheckCSourceCompiles.cmake CMake/CurlCheckCSourceRuns.cmake \
CMake/CurlTests.c CMake/FindOpenSSL.cmake CMake/FindZLIB.cmake \
CMake/OtherTests.cmake CMake/Platforms/WindowsCache.cmake \
CMake/Utilities.cmake include/curl/curlbuild.h.cmake
WINBUILD_DIST = winbuild/BUILD.WINDOWS.txt winbuild/gen_resp_file.bat \
winbuild/MakefileBuild.vc winbuild/Makefile.vc
EXTRA_DIST = CHANGES COPYING maketgz Makefile.dist curl-config.in \
curl-style.el sample.emacs RELEASE-NOTES buildconf \
libcurl.pc.in vc6curl.dsw MacOSX-Framework Android.mk $(CMAKE_DIST) \
Makefile.msvc.names $(WINBUILD_DIST) lib/libcurl.vers.in
EXTRA_DIST = CHANGES COPYING maketgz reconf Makefile.dist curl-config.in \
curl-style.el sample.emacs RELEASE-NOTES buildconf buildconf.bat \
libcurl.pc.in vc6curl.dsw
bin_SCRIPTS = curl-config
SUBDIRS = lib src include
DIST_SUBDIRS = $(SUBDIRS) tests packages docs
SUBDIRS = lib src
DIST_SUBDIRS = $(SUBDIRS) tests include packages docs
pkgconfigdir = $(libdir)/pkgconfig
pkgconfig_DATA = libcurl.pc
@ -49,7 +40,7 @@ pkgconfig_DATA = libcurl.pc
dist-hook:
rm -rf $(top_builddir)/tests/log
find $(distdir) -name "*.dist" -exec rm {} \;
(distit=`find $(srcdir) -name "*.dist" | grep -v ./ares/`; \
(distit=`find $(srcdir) -name "*.dist"`; \
for file in $$distit; do \
strip=`echo $$file | sed -e s/^$(srcdir)// -e s/\.dist//`; \
cp $$file $(distdir)$$strip; \
@ -81,21 +72,14 @@ test-full:
test-torture:
@(cd tests; $(MAKE) all torture-test)
test-am:
@(cd tests; $(MAKE) all am-test)
endif
examples:
@(cd docs/examples; $(MAKE) check)
# This is a hook to have 'make clean' also clean up the dosc and the tests
# dir. The extra check for the Makefiles being present is necessary because
# 'make distcheck' will make clean first in these directories _before_ it runs
# this hook.
clean-local:
@(if test -f tests/Makefile; then cd tests; $(MAKE) clean; fi)
@(if test -f docs/Makefile; then cd docs; $(MAKE) clean; fi)
@(cd tests; $(MAKE) clean)
@(cd docs; $(MAKE) clean)
#
# Build source and binary rpms. For rpm-3.0 and above, the ~/.rpmmacros
@ -162,7 +146,3 @@ ca-bundle: lib/mk-ca-bundle.pl
ca-firefox: lib/firefox-db2pem.sh
@echo "generate a fresh ca-bundle.crt"
./lib/firefox-db2pem.sh lib/ca-bundle.crt
checksrc:
cd lib && $(MAKE) checksrc
cd src && $(MAKE) checksrc

File diff suppressed because it is too large Load Diff

View File

@ -1,81 +0,0 @@
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 1999 - 2010, Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://curl.haxx.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
#***************************************************************************
#
# This file is included from MSVC makefiles located in lib and src,
# providing libcurl common file names required by these makefiles.
#
# ------------------
# libcurl base name
# ------------------
!IF !DEFINED(LIB_NAME) || "$(LIB_NAME)" == ""
LIB_NAME = libcurl
!ENDIF
# -------------------------------------------------
# libcurl static and dynamic libraries common base
# file names for release and debug configurations
# -------------------------------------------------
!IF !DEFINED(LIB_NAME_STA_REL) || "$(LIB_NAME_STA_REL)" == ""
LIB_NAME_STA_REL = $(LIB_NAME)
!ENDIF
!IF !DEFINED(LIB_NAME_STA_DBG) || "$(LIB_NAME_STA_DBG)" == ""
LIB_NAME_STA_DBG = $(LIB_NAME_STA_REL)d
!ENDIF
!IF !DEFINED(LIB_NAME_DYN_REL) || "$(LIB_NAME_DYN_REL)" == ""
LIB_NAME_DYN_REL = $(LIB_NAME)
!ENDIF
!IF !DEFINED(LIB_NAME_DYN_DBG) || "$(LIB_NAME_DYN_DBG)" == ""
LIB_NAME_DYN_DBG = $(LIB_NAME_DYN_REL)d
!ENDIF
# --------------------------------------------
# Base names for libcurl DLL import libraries
# --------------------------------------------
!IF !DEFINED(LIB_NAME_IMP_REL) || "$(LIB_NAME_IMP_REL)" == ""
LIB_NAME_IMP_REL = $(LIB_NAME_DYN_REL)_imp
!ENDIF
!IF !DEFINED(LIB_NAME_IMP_DBG) || "$(LIB_NAME_IMP_DBG)" == ""
LIB_NAME_IMP_DBG = $(LIB_NAME_DYN_DBG)_imp
!ENDIF
# --------------------------------------
# File names with extension and no path
# --------------------------------------
LIBCURL_STA_LIB_REL = $(LIB_NAME_STA_REL).lib
LIBCURL_STA_LIB_DBG = $(LIB_NAME_STA_DBG).lib
LIBCURL_DYN_LIB_REL = $(LIB_NAME_DYN_REL).dll
LIBCURL_DYN_LIB_DBG = $(LIB_NAME_DYN_DBG).dll
LIBCURL_IMP_LIB_REL = $(LIB_NAME_IMP_REL).lib
LIBCURL_IMP_LIB_DBG = $(LIB_NAME_IMP_DBG).lib
LIBCURL_DYN_LIB_PDB = $(LIB_NAME_IMP_DBG).pdb
# End of Makefile.msvc.names

View File

@ -34,11 +34,15 @@ WEB SITE
http://curl.haxx.se/
GIT
CVS
To download the very latest source off the GIT server do this:
To download the very latest source off the CVS server do this:
git clone git://github.com/bagder/curl.git
cvs -d :pserver:anonymous@cool.haxx.se:/cvsroot/curl login
(just press enter when asked for password)
cvs -d :pserver:anonymous@cool.haxx.se:/cvsroot/curl co curl
(you'll get a directory named curl created, filled with the source code)

View File

@ -1,100 +1,35 @@
Curl and libcurl 7.29.0
Curl and libcurl 7.19.2
Public curl releases: 131
Command line options: 152
curl_easy_setopt() options: 199
Public curl releases: 108
Command line options: 128
curl_easy_setopt() options: 158
Public functions in libcurl: 58
Known libcurl bindings: 39
Contributors: 993
This release includes the following securify fix:
o POP3/IMAP/SMTP SASL buffer overflow vulnerability [17]
Known libcurl bindings: 37
Contributors: 672
This release includes the following changes:
o test: offer "automake" output and check for perl better
o always-multi: always use non-blocking internals [1]
o imap: Added support for sasl digest-md5 authentication
o imap: Added support for sasl cram-md5 authentication
o imap: Added support for sasl ntlm authentication
o imap: Added support for sasl login authentication
o imap: Added support for sasl plain text authentication
o imap: Added support for login disabled server capability
o mk-ca-bundle: add -f, support passing to stdout and more [5]
o writeout: -w now supports remote_ip/port and local_ip/port
o
This release includes the following bugfixes:
o nss: prevent NSS from crashing on client auth hook failure
o darwinssl: Fixed inability to disable peer verification on Snow Leopard
and Lion
o curl_multi_remove_handle: fix memory leak triggered with CURLOPT_RESOLVE
o SCP: relative path didn't work as documented [7]
o setup_once.h: HP-UX <sys/socket.h> issue workaround
o configure: fix cross pkg-config detection
o runtests: Do not add undefined values to @INC
o build: fix compilation with CURL_DISABLE_CRYPTO_AUTH flag
o multi: fix re-sending request on early connection close
o HTTP: remove stray CRLF in chunk-encoded content-free request bodies
o build: fix AIX compilation and usage of events/revents
o VC Makefiles: add missing hostcheck
o nss: clear session cache if a client certificate from file is used
o nss: fix error messages for CURLE_SSL_{CACERT,CRL}_BADFILE
o fix HTTP CONNECT tunnel establishment upon delayed response [2]
o --libcurl: fix for non-zero default options
o FTP: reject illegal port numbers in EPSV 229 responses
o build: use per-target '_CPPFLAGS' for those currently using default
o configure: fix automake 1.13 compatibility [6]
o curl: ignore SIGPIPE [4]
o pop3: Added support for non-blocking SSL upgrade
o pop3: Fixed default authentication detection
o imap: Fixed usernames and passwords that contain escape characters
o packages/DOS/common.dj: remove COFF debug info generation [3]
o imap/pop3/smtp: Fixed failure detection during TLS upgrade [8]
o pop3: Fixed no known authentication mechanism when fallback is required [9]
o formadd: reject trying to read a directory where a file is expected [10]
o formpost: support quotes, commas and semicolon in file names [11]
o docs: update the comments about loading CA certs with NSS [12]
o docs: fix typos in man pages [13]
o darwinssl: Fix bug where packets were sometimes transmitted twice [14]
o winbuild: include version info for .dll .exe [15]
o schannel: Removed extended error connection setup flag [16]
o VMS: fix and generate the VMS build config
o build failure when using MSVC 6 makefile and on four platforms more
o crash when using --interface name on Linux systems with a TEQL device
o using the multi interface to download a HTTPS page with libcurl built
powered by OpenSSL could download "rubbish" instead of actual content
This release includes the following known bugs:
o see docs/KNOWN_BUGS (http://curl.haxx.se/docs/knownbugs.html)
Other curl-related news:
o
This release would not have looked like this without help, code, reports and
advice from friends like these:
Nick Zitzmann, Colin Watson, Fabian Keil, Kamil Dudka, Lijo Antony,
Linus Nielsen Feltzing, Marc Hoersken, Stanislav Ivochkin, Steve Holme,
Yang Tse, Balaji Parasuram, Dan Fandrich, Bob Relyea, Gisle Vanem,
Yves Arrouye, Kai Engert, Lluís Batlle i Rossell, Jirí Hruka,
John E. Malmberg, Tor Arntsen, Matt Arsenault, Sergei Nikulov,
Guenter Knauf, Craig Davison, Ulrich Doehner, Jiri Jaburek, Bruno de Carvalho,
Eldar Zaitov
John Wilkinson, Adam Sampson, Daniel Fandrich, Yang Tse, Rainer Canavan,
Michal Marek
Thanks! (and sorry if I forgot to mention someone)
References to bug reports and discussions on issues:
[1] = http://daniel.haxx.se/blog/2013/01/17/internally-were-all-multi-now/
[2] = http://curl.haxx.se/mail/lib-2013-01/0191.html
[3] = http://curl.haxx.se/mail/lib-2013-01/0130.html
[4] = http://curl.haxx.se/bug/view.cgi?id=1180
[5] = http://curl.haxx.se/mail/lib-2013-01/0045.html
[6] = http://curl.haxx.se/mail/lib-2012-12/0246.html
[7] = http://curl.haxx.se/bug/view.cgi?id=1173
[8] = http://curl.haxx.se/mail/lib-2013-01/0250.html
[9] = http://curl.haxx.se/mail/lib-2013-02/0004.html
[10] = http://curl.haxx.se/mail/archive-2013-01/0017.html
[11] = http://curl.haxx.se/bug/view.cgi?id=1171
[12] = https://bugzilla.redhat.com/696783
[13] = https://bugzilla.redhat.com/896544
[14] = http://curl.haxx.se/mail/lib-2013-01/0295.html
[15] = http://curl.haxx.se/bug/view.cgi?id=1186
[16] = http://curl.haxx.se/bug/view.cgi?id=1187
[17] = http://curl.haxx.se/docs/adv_20130206.html

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -6,7 +6,7 @@
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
# Copyright (C) 1998 - 2008, Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
@ -19,14 +19,12 @@
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# $Id: buildconf,v 1.63 2008-08-14 23:55:24 yangtse Exp $
###########################################################################
#--------------------------------------------------------------------------
# die prints argument string to stdout and exits this shell script.
#
die(){
echo "buildconf: $@"
exit 1
echo "$@"
exit
}
#--------------------------------------------------------------------------
@ -36,14 +34,6 @@ die(){
findtool(){
file="$1"
if { echo "$file" | grep "/" >/dev/null 2>&1; } then
# when file is given with a path check it first
if test -f "$file"; then
echo "$file"
return
fi
fi
old_IFS=$IFS; IFS=':'
for path in $PATH
do
@ -80,19 +70,16 @@ removethis(){
# Ensure that buildconf runs from the subdirectory where configure.ac lives
#
if test ! -f configure.ac ||
test ! -f src/tool_main.c ||
test ! -f src/main.c ||
test ! -f lib/urldata.h ||
test ! -f include/curl/curl.h ||
test ! -f m4/curl-functions.m4; then
test ! -f include/curl/curl.h; then
echo "Can not run buildconf from outside of curl's source subdirectory!"
echo "Change to the subdirectory where buildconf is found, and try again."
exit 1
fi
#--------------------------------------------------------------------------
# autoconf 2.57 or newer. Unpatched version 2.67 does not generate proper
# configure script. Unpatched version 2.68 is simply unusable, we should
# disallow 2.68 usage.
# autoconf 2.57 or newer
#
need_autoconf="2.57"
ac_version=`${AUTOCONF:-autoconf} --version 2>/dev/null|head -n 1| sed -e 's/^[^0-9]*//' -e 's/[a-z]* *$//'`
@ -101,7 +88,7 @@ if test -z "$ac_version"; then
echo " You need autoconf version $need_autoconf or newer installed."
exit 1
fi
old_IFS=$IFS; IFS='.'; set $ac_version; IFS=$old_IFS
IFS=.; set $ac_version; IFS=' '
if test "$1" = "2" -a "$2" -lt "57" || test "$1" -lt "2"; then
echo "buildconf: autoconf version $ac_version found."
echo " You need autoconf version $need_autoconf or newer installed."
@ -111,15 +98,7 @@ if test "$1" = "2" -a "$2" -lt "57" || test "$1" -lt "2"; then
exit 1
fi
if test "$1" = "2" -a "$2" -eq "67"; then
echo "buildconf: autoconf version $ac_version (BAD)"
echo " Unpatched version generates broken configure script."
elif test "$1" = "2" -a "$2" -eq "68"; then
echo "buildconf: autoconf version $ac_version (BAD)"
echo " Unpatched version generates unusable configure script."
else
echo "buildconf: autoconf version $ac_version (ok)"
fi
echo "buildconf: autoconf version $ac_version (ok)"
am4te_version=`${AUTOM4TE:-autom4te} --version 2>/dev/null|head -n 1| sed -e 's/autom4te\(.*\)/\1/' -e 's/^[^0-9]*//' -e 's/[a-z]* *$//'`
if test -z "$am4te_version"; then
@ -142,7 +121,7 @@ if test -z "$ah_version"; then
echo " You need autoheader version 2.50 or newer installed."
exit 1
fi
old_IFS=$IFS; IFS='.'; set $ah_version; IFS=$old_IFS
IFS=.; set $ah_version; IFS=' '
if test "$1" = "2" -a "$2" -lt "50" || test "$1" -lt "2"; then
echo "buildconf: autoheader version $ah_version found."
echo " You need autoheader version 2.50 or newer installed."
@ -164,7 +143,7 @@ if test -z "$am_version"; then
echo " You need automake version $need_automake or newer installed."
exit 1
fi
old_IFS=$IFS; IFS='.'; set $am_version; IFS=$old_IFS
IFS=.; set $am_version; IFS=' '
if test "$1" = "1" -a "$2" -lt "7" || test "$1" -lt "1"; then
echo "buildconf: automake version $am_version found."
echo " You need automake version $need_automake or newer installed."
@ -189,97 +168,83 @@ else
fi
#--------------------------------------------------------------------------
# GNU libtool preliminary check
# libtool check
#
want_lt_major=1
want_lt_minor=4
want_lt_patch=2
want_lt_version=1.4.2
# This approach that tries 'glibtool' first is intended for systems that
# have GNU libtool named as 'glibtool' and libtool not being GNU's.
LIBTOOL_WANTED_MAJOR=1
LIBTOOL_WANTED_MINOR=4
LIBTOOL_WANTED_PATCH=2
LIBTOOL_WANTED_VERSION=1.4.2
# this approach that tries 'glibtool' first is some kind of work-around for
# some BSD-systems I believe that use to provide the GNU libtool named
# glibtool, with 'libtool' being something completely different.
libtool=`findtool glibtool 2>/dev/null`
if test ! -x "$libtool"; then
libtool=`findtool ${LIBTOOL:-libtool}`
fi
if test -z "$libtool"; then
echo "buildconf: libtool not found."
echo " You need GNU libtool $want_lt_version or newer installed."
exit 1
fi
lt_pver=`$libtool --version 2>/dev/null|head -n 1`
lt_qver=`echo $lt_pver|sed -e "s/([^)]*)//g" -e "s/^[^0-9]*//g"`
lt_version=`echo $lt_qver|sed -e "s/[- ].*//" -e "s/\([a-z]*\)$//"`
if test -z "$lt_version"; then
echo "buildconf: libtool not found."
echo " You need GNU libtool $want_lt_version or newer installed."
exit 1
fi
old_IFS=$IFS; IFS='.'; set $lt_version; IFS=$old_IFS
lt_major=$1
lt_minor=$2
lt_patch=$3
if test -z "$lt_major"; then
lt_status="bad"
elif test "$lt_major" -gt "$want_lt_major"; then
lt_status="good"
elif test "$lt_major" -lt "$want_lt_major"; then
lt_status="bad"
elif test -z "$lt_minor"; then
lt_status="bad"
elif test "$lt_minor" -gt "$want_lt_minor"; then
lt_status="good"
elif test "$lt_minor" -lt "$want_lt_minor"; then
lt_status="bad"
elif test -z "$lt_patch"; then
lt_status="bad"
elif test "$lt_patch" -gt "$want_lt_patch"; then
lt_status="good"
elif test "$lt_patch" -lt "$want_lt_patch"; then
lt_status="bad"
if test -z "$LIBTOOLIZE"; then
# set the LIBTOOLIZE here so that glibtoolize is used if glibtool was found
# $libtool is already the full path
libtoolize="${libtool}ize"
else
lt_status="good"
libtoolize=`findtool $LIBTOOLIZE`
fi
if test "$lt_status" != "good"; then
echo "buildconf: libtool version $lt_version found."
echo " You need GNU libtool $want_lt_version or newer installed."
lt_pversion=`$libtool --version 2>/dev/null|head -n 2|sed -e 's/^[^0-9]*//g' -e 's/[- ].*//'`
if test -z "$lt_pversion"; then
echo "buildconf: libtool not found."
echo " You need libtool version $LIBTOOL_WANTED_VERSION or newer installed"
exit 1
fi
lt_version=`echo $lt_pversion|sed -e 's/\([a-z]*\)$//'`
IFS=.; set $lt_version; IFS=' '
lt_status="good"
major=$1
minor=$2
patch=$3
if test "$major" = "$LIBTOOL_WANTED_MAJOR"; then
if test "$minor" -lt "$LIBTOOL_WANTED_MINOR"; then
lt_status="bad"
elif test -n "$LIBTOOL_WANTED_PATCH"; then
if test "$minor" -gt "$LIBTOOL_WANTED_MINOR"; then
lt_status="good"
elif test -n "$patch"; then
if test "$patch" -lt "$LIBTOOL_WANTED_PATCH"; then
lt_status="bad"
fi
else
lt_status="bad"
fi
fi
fi
if test $lt_status != "good"; then
echo "buildconf: libtool version $lt_pversion found."
echo " You need libtool version $LIBTOOL_WANTED_VERSION or newer installed"
exit 1
fi
echo "buildconf: libtool version $lt_version (ok)"
#--------------------------------------------------------------------------
# GNU libtoolize check
#
if test -z "$LIBTOOLIZE"; then
# use (g)libtoolize from same location as (g)libtool
libtoolize="${libtool}ize"
if test -f "$libtoolize"; then
echo "buildconf: libtoolize found"
else
libtoolize=`findtool $LIBTOOLIZE`
fi
if test ! -f "$libtoolize"; then
echo "buildconf: libtoolize not found."
echo " You need GNU libtoolize $want_lt_version or newer installed."
echo "buildconf: libtoolize not found. Weird libtool installation!"
exit 1
fi
#--------------------------------------------------------------------------
# m4 check
#
m4=`(${M4:-m4} --version || ${M4:-gm4} --version) 2>/dev/null | head -n 1`;
m4=`${M4:-m4} --version 2>/dev/null|head -n 1`;
m4_version=`echo $m4 | sed -e 's/^.* \([0-9]\)/\1/' -e 's/[a-z]* *$//'`
if { echo $m4 | grep "GNU" >/dev/null 2>&1; } then
echo "buildconf: GNU m4 version $m4_version (ok)"
else
if test -z "$m4"; then
echo "buildconf: m4 version not recognized. You need a GNU m4 installed!"
else
echo "buildconf: m4 version $m4 found. You need a GNU m4 installed!"
fi
echo "buildconf: m4 version $m4 found. You need a GNU m4 installed!"
exit 1
fi
@ -287,38 +252,23 @@ fi
# perl check
#
PERL=`findtool ${PERL:-perl}`
if test -z "$PERL"; then
echo "buildconf: perl not found"
exit 1
fi
#--------------------------------------------------------------------------
# Remove files generated on previous buildconf/configure run.
#
for fname in .deps \
.libs \
*.la \
*.lo \
*.a \
*.o \
Makefile \
Makefile.in \
aclocal.m4 \
aclocal.m4.bak \
ares_build.h \
ares_config.h \
ares_config.h.in \
autom4te.cache \
compile \
config.guess \
curl_config.h \
curl_config.h.in \
config.h \
config.h.in \
config.log \
config.lt \
config.status \
config.sub \
configure \
configurehelp.pm \
curl-config \
curlbuild.h \
depcomp \
@ -326,12 +276,7 @@ for fname in .deps \
libcurl.pc \
libtool \
libtool.m4 \
libtool.m4.tmp \
ltmain.sh \
ltoptions.m4 \
ltsugar.m4 \
ltversion.m4 \
lt~obsolete.m4 \
stamp-h1 \
stamp-h2 \
stamp-h3 ; do
@ -343,49 +288,27 @@ done
#
echo "buildconf: running libtoolize"
${libtoolize} --copy --automake --force || die "libtoolize command failed"
# When using libtool 1.5.X (X < 26) we copy libtool.m4 to our local m4
# subdirectory and this local copy is patched to fix some warnings that
# are triggered when running aclocal and using autoconf 2.62 or later.
if test "$lt_major" = "1" && test "$lt_minor" = "5"; then
if test -z "$lt_patch" || test "$lt_patch" -lt "26"; then
echo "buildconf: copying libtool.m4 to local m4 subdir"
ac_dir=`${ACLOCAL:-aclocal} --print-ac-dir`
if test -f $ac_dir/libtool.m4; then
cp -f $ac_dir/libtool.m4 m4/libtool.m4
else
echo "buildconf: $ac_dir/libtool.m4 not found"
fi
if test -f m4/libtool.m4; then
echo "buildconf: renaming some variables in local m4/libtool.m4"
$PERL -i.tmp -pe \
's/lt_prog_compiler_pic_works/lt_cv_prog_compiler_pic_works/g; \
s/lt_prog_compiler_static_works/lt_cv_prog_compiler_static_works/g;' \
m4/libtool.m4
rm -f m4/libtool.m4.tmp
fi
fi
fi
if test -f m4/libtool.m4; then
echo "buildconf: converting all mv to mv -f in local m4/libtool.m4"
$PERL -i.tmp -pe 's/\bmv +([^-\s])/mv -f $1/g' m4/libtool.m4
rm -f m4/libtool.m4.tmp
fi
$libtoolize --copy --automake --force || die "The libtoolize command failed"
echo "buildconf: running aclocal"
${ACLOCAL:-aclocal} -I m4 $ACLOCAL_FLAGS || die "aclocal command failed"
${ACLOCAL:-aclocal} -I m4 $ACLOCAL_FLAGS || die "The aclocal command line failed"
echo "buildconf: converting all mv to mv -f in local aclocal.m4"
$PERL -i.bak -pe 's/\bmv +([^-\s])/mv -f $1/g' aclocal.m4
if test -n "$PERL"; then
echo "buildconf: running aclocal hack to convert all mv to mv -f"
$PERL -i.bak -pe 's/\bmv +([^-\s])/mv -f $1/g' aclocal.m4
else
echo "buildconf: perl not found"
exit 1
fi
echo "buildconf: running autoheader"
${AUTOHEADER:-autoheader} || die "autoheader command failed"
${AUTOHEADER:-autoheader} || die "The autoheader command failed"
echo "buildconf: cp lib/config.h.in src/config.h.in"
cp lib/config.h.in src/config.h.in
echo "buildconf: running autoconf"
${AUTOCONF:-autoconf} || die "autoconf command failed"
${AUTOCONF:-autoconf} || die "The autoconf command failed"
if test -d ares; then
cd ares
@ -395,67 +318,7 @@ if test -d ares; then
fi
echo "buildconf: running automake"
${AUTOMAKE:-automake} --add-missing --copy || die "automake command failed"
${AUTOMAKE:-automake} -a -c || die "The automake command failed"
#--------------------------------------------------------------------------
# GNU libtool complementary check
#
# Depending on the libtool and automake versions being used, config.guess
# might not be installed in the subdirectory until automake has finished.
# So we can not attempt to use it until this very last buildconf stage.
#
if test ! -f ./config.guess; then
echo "buildconf: config.guess not found"
else
buildhost=`./config.guess 2>/dev/null|head -n 1`
case $buildhost in
*-*-darwin*)
need_lt_major=1
need_lt_minor=5
need_lt_patch=26
need_lt_check="yes"
;;
*-*-hpux*)
need_lt_major=1
need_lt_minor=5
need_lt_patch=24
need_lt_check="yes"
;;
esac
if test ! -z "$need_lt_check"; then
if test -z "$lt_major"; then
lt_status="bad"
elif test "$lt_major" -gt "$need_lt_major"; then
lt_status="good"
elif test "$lt_major" -lt "$need_lt_major"; then
lt_status="bad"
elif test -z "$lt_minor"; then
lt_status="bad"
elif test "$lt_minor" -gt "$need_lt_minor"; then
lt_status="good"
elif test "$lt_minor" -lt "$need_lt_minor"; then
lt_status="bad"
elif test -z "$lt_patch"; then
lt_status="bad"
elif test "$lt_patch" -gt "$need_lt_patch"; then
lt_status="good"
elif test "$lt_patch" -lt "$need_lt_patch"; then
lt_status="bad"
else
lt_status="good"
fi
if test "$lt_status" != "good"; then
need_lt_version="$need_lt_major.$need_lt_minor.$need_lt_patch"
echo "buildconf: libtool version $lt_version found."
echo " $buildhost requires GNU libtool $need_lt_version or newer installed."
rm -f configure
exit 1
fi
fi
fi
#--------------------------------------------------------------------------
# Finished successfully.
#
echo "buildconf: OK"
exit 0

View File

@ -0,0 +1,13 @@
@echo off
REM set up a CVS tree to build when there's no autotools
REM $Revision: 1.6 $
REM $Date: 2008-08-07 00:29:08 $
REM create hugehelp.c
copy src\hugehelp.c.cvs src\hugehelp.c
REM create Makefile
copy Makefile.dist Makefile
REM create curlbuild.h
copy include\curl\curlbuild.h.dist include\curl\curlbuild.h

View File

@ -1,10 +1,9 @@
#! /bin/sh
# Wrapper for compilers which do not understand '-c -o'.
# Wrapper for compilers which do not understand `-c -o'.
scriptversion=2012-03-05.13; # UTC
scriptversion=2005-05-14.22
# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2009, 2010, 2012 Free
# Software Foundation, Inc.
# Copyright (C) 1999, 2000, 2003, 2004, 2005 Free Software Foundation, Inc.
# Written by Tom Tromey <tromey@cygnus.com>.
#
# This program is free software; you can redistribute it and/or modify
@ -18,7 +17,8 @@ scriptversion=2012-03-05.13; # UTC
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
@ -29,219 +29,21 @@ scriptversion=2012-03-05.13; # UTC
# bugs to <bug-automake@gnu.org> or send patches to
# <automake-patches@gnu.org>.
nl='
'
# We need space, tab and new line, in precisely that order. Quoting is
# there to prevent tools from complaining about whitespace usage.
IFS=" "" $nl"
file_conv=
# func_file_conv build_file lazy
# Convert a $build file to $host form and store it in $file
# Currently only supports Windows hosts. If the determined conversion
# type is listed in (the comma separated) LAZY, no conversion will
# take place.
func_file_conv ()
{
file=$1
case $file in
/ | /[!/]*) # absolute file, and not a UNC file
if test -z "$file_conv"; then
# lazily determine how to convert abs files
case `uname -s` in
MINGW*)
file_conv=mingw
;;
CYGWIN*)
file_conv=cygwin
;;
*)
file_conv=wine
;;
esac
fi
case $file_conv/,$2, in
*,$file_conv,*)
;;
mingw/*)
file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'`
;;
cygwin/*)
file=`cygpath -m "$file" || echo "$file"`
;;
wine/*)
file=`winepath -w "$file" || echo "$file"`
;;
esac
;;
esac
}
# func_cl_dashL linkdir
# Make cl look for libraries in LINKDIR
func_cl_dashL ()
{
func_file_conv "$1"
if test -z "$lib_path"; then
lib_path=$file
else
lib_path="$lib_path;$file"
fi
linker_opts="$linker_opts -LIBPATH:$file"
}
# func_cl_dashl library
# Do a library search-path lookup for cl
func_cl_dashl ()
{
lib=$1
found=no
save_IFS=$IFS
IFS=';'
for dir in $lib_path $LIB
do
IFS=$save_IFS
if $shared && test -f "$dir/$lib.dll.lib"; then
found=yes
lib=$dir/$lib.dll.lib
break
fi
if test -f "$dir/$lib.lib"; then
found=yes
lib=$dir/$lib.lib
break
fi
done
IFS=$save_IFS
if test "$found" != yes; then
lib=$lib.lib
fi
}
# func_cl_wrapper cl arg...
# Adjust compile command to suit cl
func_cl_wrapper ()
{
# Assume a capable shell
lib_path=
shared=:
linker_opts=
for arg
do
if test -n "$eat"; then
eat=
else
case $1 in
-o)
# configure might choose to run compile as 'compile cc -o foo foo.c'.
eat=1
case $2 in
*.o | *.[oO][bB][jJ])
func_file_conv "$2"
set x "$@" -Fo"$file"
shift
;;
*)
func_file_conv "$2"
set x "$@" -Fe"$file"
shift
;;
esac
;;
-I)
eat=1
func_file_conv "$2" mingw
set x "$@" -I"$file"
shift
;;
-I*)
func_file_conv "${1#-I}" mingw
set x "$@" -I"$file"
shift
;;
-l)
eat=1
func_cl_dashl "$2"
set x "$@" "$lib"
shift
;;
-l*)
func_cl_dashl "${1#-l}"
set x "$@" "$lib"
shift
;;
-L)
eat=1
func_cl_dashL "$2"
;;
-L*)
func_cl_dashL "${1#-L}"
;;
-static)
shared=false
;;
-Wl,*)
arg=${1#-Wl,}
save_ifs="$IFS"; IFS=','
for flag in $arg; do
IFS="$save_ifs"
linker_opts="$linker_opts $flag"
done
IFS="$save_ifs"
;;
-Xlinker)
eat=1
linker_opts="$linker_opts $2"
;;
-*)
set x "$@" "$1"
shift
;;
*.cc | *.CC | *.cxx | *.CXX | *.[cC]++)
func_file_conv "$1"
set x "$@" -Tp"$file"
shift
;;
*.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO])
func_file_conv "$1" mingw
set x "$@" "$file"
shift
;;
*)
set x "$@" "$1"
shift
;;
esac
fi
shift
done
if test -n "$linker_opts"; then
linker_opts="-link$linker_opts"
fi
exec "$@" $linker_opts
exit 1
}
eat=
case $1 in
'')
echo "$0: No command. Try '$0 --help' for more information." 1>&2
echo "$0: No command. Try \`$0 --help' for more information." 1>&2
exit 1;
;;
-h | --h*)
cat <<\EOF
Usage: compile [--help] [--version] PROGRAM [ARGS]
Wrapper for compilers which do not understand '-c -o'.
Remove '-o dest.o' from ARGS, run PROGRAM with the remaining
Wrapper for compilers which do not understand `-c -o'.
Remove `-o dest.o' from ARGS, run PROGRAM with the remaining
arguments, and rename the output as expected.
If you are trying to build a whole package this is not the
right script to run: please start by reading the file 'INSTALL'.
right script to run: please start by reading the file `INSTALL'.
Report bugs to <bug-automake@gnu.org>.
EOF
@ -251,13 +53,11 @@ EOF
echo "compile $scriptversion"
exit $?
;;
cl | *[/\\]cl | cl.exe | *[/\\]cl.exe )
func_cl_wrapper "$@" # Doesn't return...
;;
esac
ofile=
cfile=
eat=
for arg
do
@ -266,8 +66,8 @@ do
else
case $1 in
-o)
# configure might choose to run compile as 'compile cc -o foo foo.c'.
# So we strip '-o arg' only if arg is an object.
# configure might choose to run compile as `compile cc -o foo foo.c'.
# So we strip `-o arg' only if arg is an object.
eat=1
case $2 in
*.o | *.obj)
@ -294,22 +94,22 @@ do
done
if test -z "$ofile" || test -z "$cfile"; then
# If no '-o' option was seen then we might have been invoked from a
# If no `-o' option was seen then we might have been invoked from a
# pattern rule where we don't need one. That is ok -- this is a
# normal compilation that the losing compiler can handle. If no
# '.c' file was seen then we are probably linking. That is also
# `.c' file was seen then we are probably linking. That is also
# ok.
exec "$@"
fi
# Name of file we expect compiler to create.
cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'`
cofile=`echo "$cfile" | sed -e 's|^.*/||' -e 's/\.c$/.o/'`
# Create the lock directory.
# Note: use '[/\\:.-]' here to ensure that we don't use the same name
# Note: use `[/.-]' here to ensure that we don't use the same name
# that we are using for the .o file. Also, base the name on the expected
# object file name, since that is what matters with a parallel build.
lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d
lockdir=`echo "$cofile" | sed -e 's|[/.-]|_|g'`.d
while true; do
if mkdir "$lockdir" >/dev/null 2>&1; then
break
@ -324,9 +124,9 @@ trap "rmdir '$lockdir'; exit 1" 1 2 15
ret=$?
if test -f "$cofile"; then
test "$cofile" = "$ofile" || mv "$cofile" "$ofile"
mv "$cofile" "$ofile"
elif test -f "${cofile}bj"; then
test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile"
mv "${cofile}bj" "$ofile"
fi
rmdir "$lockdir"
@ -338,6 +138,5 @@ exit $ret
# eval: (add-hook 'write-file-hooks 'time-stamp)
# time-stamp-start: "scriptversion="
# time-stamp-format: "%:y-%02m-%02d.%02H"
# time-stamp-time-zone: "UTC"
# time-stamp-end: "; # UTC"
# time-stamp-end: "$"
# End:

View File

@ -1,10 +1,10 @@
#! /bin/sh
# Attempt to guess a canonical system name.
# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012 Free Software Foundation, Inc.
# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
# Free Software Foundation, Inc.
timestamp='2012-02-10'
timestamp='2008-01-23'
# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
@ -17,7 +17,9 @@ timestamp='2012-02-10'
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
# 02110-1301, USA.
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
@ -25,16 +27,16 @@ timestamp='2012-02-10'
# the same distribution terms that you use for the rest of that program.
# Originally written by Per Bothner. Please send patches (context
# diff format) to <config-patches@gnu.org> and include a ChangeLog
# entry.
# Originally written by Per Bothner <per@bothner.com>.
# Please send patches to <config-patches@gnu.org>. Submit a context
# diff and a properly formatted ChangeLog entry.
#
# This script attempts to guess a canonical system name similar to
# config.sub. If it succeeds, it prints the system name on stdout, and
# exits with 0. Otherwise, it exits with 1.
#
# You can get the latest version of this script from:
# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD
# The plan is that this can be called by configure scripts if you
# don't specify an explicit build system type.
me=`echo "$0" | sed -e 's,.*/,,'`
@ -54,9 +56,8 @@ version="\
GNU config.guess ($timestamp)
Originally written by Per Bothner.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
Free Software Foundation, Inc.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@ -143,7 +144,7 @@ UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
*:NetBSD:*:*)
# NetBSD (nbsd) targets should (where applicable) match one or
# more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
# more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*,
# *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
# switched to ELF, *-*-netbsd* would select the old
# object file format. This provides both forward
@ -169,7 +170,7 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
arm*|i386|m68k|ns32k|sh3*|sparc|vax)
eval $set_cc_for_build
if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
| grep -q __ELF__
| grep __ELF__ >/dev/null
then
# Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
# Return netbsd for either. FIX?
@ -179,7 +180,7 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
fi
;;
*)
os=netbsd
os=netbsd
;;
esac
# The OS release
@ -222,7 +223,7 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
;;
*5.*)
UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
;;
esac
# According to Compaq, /usr/sbin/psrinfo has been available on
@ -268,10 +269,7 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
# A Xn.n version is an unreleased experimental baselevel.
# 1.2 uses "1.2" for uname -r.
echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
# Reset EXIT trap before exiting to avoid spurious non-zero exit code.
exitcode=$?
trap '' 0
exit $exitcode ;;
exit ;;
Alpha\ *:Windows_NT*:*)
# How do we know it's Interix rather than the generic POSIX subsystem?
# Should we change UNAME_MACHINE based on the output of uname instead
@ -297,7 +295,7 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
echo s390-ibm-zvmoe
exit ;;
*:OS400:*:*)
echo powerpc-ibm-os400
echo powerpc-ibm-os400
exit ;;
arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
echo arm-acorn-riscix${UNAME_RELEASE}
@ -326,33 +324,14 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
case `/usr/bin/uname -p` in
sparc) echo sparc-icl-nx7; exit ;;
esac ;;
s390x:SunOS:*:*)
echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
exit ;;
sun4H:SunOS:5.*:*)
echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
exit ;;
sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
exit ;;
i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
echo i386-pc-auroraux${UNAME_RELEASE}
exit ;;
i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
eval $set_cc_for_build
SUN_ARCH="i386"
# If there is a compiler, see if it is configured for 64-bit objects.
# Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
# This test works for both compilers.
if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
(CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
grep IS_64BIT_ARCH >/dev/null
then
SUN_ARCH="x86_64"
fi
fi
echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
exit ;;
sun4*:SunOS:6*:*)
# According to config.sub, this is the proper way to canonicalize
@ -396,23 +375,23 @@ case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
# MiNT. But MiNT is downward compatible to TOS, so this should
# be no problem.
atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
echo m68k-atari-mint${UNAME_RELEASE}
echo m68k-atari-mint${UNAME_RELEASE}
exit ;;
atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
echo m68k-atari-mint${UNAME_RELEASE}
exit ;;
exit ;;
*falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
echo m68k-atari-mint${UNAME_RELEASE}
echo m68k-atari-mint${UNAME_RELEASE}
exit ;;
milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
echo m68k-milan-mint${UNAME_RELEASE}
exit ;;
echo m68k-milan-mint${UNAME_RELEASE}
exit ;;
hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
echo m68k-hades-mint${UNAME_RELEASE}
exit ;;
echo m68k-hades-mint${UNAME_RELEASE}
exit ;;
*:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
echo m68k-unknown-mint${UNAME_RELEASE}
exit ;;
echo m68k-unknown-mint${UNAME_RELEASE}
exit ;;
m68k:machten:*:*)
echo m68k-apple-machten${UNAME_RELEASE}
exit ;;
@ -482,8 +461,8 @@ EOF
echo m88k-motorola-sysv3
exit ;;
AViiON:dgux:*:*)
# DG/UX returns AViiON for all architectures
UNAME_PROCESSOR=`/usr/bin/uname -p`
# DG/UX returns AViiON for all architectures
UNAME_PROCESSOR=`/usr/bin/uname -p`
if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
then
if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
@ -496,7 +475,7 @@ EOF
else
echo i586-dg-dgux${UNAME_RELEASE}
fi
exit ;;
exit ;;
M88*:DolphinOS:*:*) # DolphinOS (SVR3)
echo m88k-dolphin-sysv3
exit ;;
@ -553,7 +532,7 @@ EOF
echo rs6000-ibm-aix3.2
fi
exit ;;
*:AIX:*:[4567])
*:AIX:*:[456])
IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
IBM_ARCH=rs6000
@ -596,52 +575,52 @@ EOF
9000/[678][0-9][0-9])
if [ -x /usr/bin/getconf ]; then
sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
case "${sc_cpu_version}" in
523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
532) # CPU_PA_RISC2_0
case "${sc_kernel_bits}" in
32) HP_ARCH="hppa2.0n" ;;
64) HP_ARCH="hppa2.0w" ;;
sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
case "${sc_cpu_version}" in
523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
532) # CPU_PA_RISC2_0
case "${sc_kernel_bits}" in
32) HP_ARCH="hppa2.0n" ;;
64) HP_ARCH="hppa2.0w" ;;
'') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
esac ;;
esac
esac ;;
esac
fi
if [ "${HP_ARCH}" = "" ]; then
eval $set_cc_for_build
sed 's/^ //' << EOF >$dummy.c
sed 's/^ //' << EOF >$dummy.c
#define _HPUX_SOURCE
#include <stdlib.h>
#include <unistd.h>
#define _HPUX_SOURCE
#include <stdlib.h>
#include <unistd.h>
int main ()
{
#if defined(_SC_KERNEL_BITS)
long bits = sysconf(_SC_KERNEL_BITS);
#endif
long cpu = sysconf (_SC_CPU_VERSION);
int main ()
{
#if defined(_SC_KERNEL_BITS)
long bits = sysconf(_SC_KERNEL_BITS);
#endif
long cpu = sysconf (_SC_CPU_VERSION);
switch (cpu)
{
case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
case CPU_PA_RISC2_0:
#if defined(_SC_KERNEL_BITS)
switch (bits)
{
case 64: puts ("hppa2.0w"); break;
case 32: puts ("hppa2.0n"); break;
default: puts ("hppa2.0"); break;
} break;
#else /* !defined(_SC_KERNEL_BITS) */
puts ("hppa2.0"); break;
#endif
default: puts ("hppa1.0"); break;
}
exit (0);
}
switch (cpu)
{
case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
case CPU_PA_RISC2_0:
#if defined(_SC_KERNEL_BITS)
switch (bits)
{
case 64: puts ("hppa2.0w"); break;
case 32: puts ("hppa2.0n"); break;
default: puts ("hppa2.0"); break;
} break;
#else /* !defined(_SC_KERNEL_BITS) */
puts ("hppa2.0"); break;
#endif
default: puts ("hppa1.0"); break;
}
exit (0);
}
EOF
(CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
test -z "$HP_ARCH" && HP_ARCH=hppa
@ -661,7 +640,7 @@ EOF
# => hppa64-hp-hpux11.23
if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
grep -q __LP64__
grep __LP64__ >/dev/null
then
HP_ARCH="hppa2.0w"
else
@ -732,22 +711,22 @@ EOF
exit ;;
C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
echo c1-convex-bsd
exit ;;
exit ;;
C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
if getsysinfo -f scalar_acc
then echo c32-convex-bsd
else echo c2-convex-bsd
fi
exit ;;
exit ;;
C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
echo c34-convex-bsd
exit ;;
exit ;;
C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
echo c38-convex-bsd
exit ;;
exit ;;
C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
echo c4-convex-bsd
exit ;;
exit ;;
CRAY*Y-MP:*:*:*)
echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
exit ;;
@ -771,14 +750,14 @@ EOF
exit ;;
F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
exit ;;
FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
exit ;;
5000:UNIX_System_V:4.*:*)
FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
exit ;;
i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
@ -790,12 +769,13 @@ EOF
echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
exit ;;
*:FreeBSD:*:*)
UNAME_PROCESSOR=`/usr/bin/uname -p`
case ${UNAME_PROCESSOR} in
case ${UNAME_MACHINE} in
pc98)
echo i386-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
amd64)
echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
*)
echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
esac
exit ;;
i*:CYGWIN*:*)
@ -804,22 +784,19 @@ EOF
*:MINGW*:*)
echo ${UNAME_MACHINE}-pc-mingw32
exit ;;
i*:MSYS*:*)
echo ${UNAME_MACHINE}-pc-msys
exit ;;
i*:windows32*:*)
# uname -m includes "-pc" on this system.
echo ${UNAME_MACHINE}-mingw32
# uname -m includes "-pc" on this system.
echo ${UNAME_MACHINE}-mingw32
exit ;;
i*:PW*:*)
echo ${UNAME_MACHINE}-pc-pw32
exit ;;
*:Interix*:*)
case ${UNAME_MACHINE} in
*:Interix*:[3456]*)
case ${UNAME_MACHINE} in
x86)
echo i586-pc-interix${UNAME_RELEASE}
exit ;;
authenticamd | genuineintel | EM64T)
EM64T | authenticamd)
echo x86_64-unknown-interix${UNAME_RELEASE}
exit ;;
IA64)
@ -829,9 +806,6 @@ EOF
[345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
echo i${UNAME_MACHINE}-pc-mks
exit ;;
8664:Windows_NT:*)
echo x86_64-pc-mks
exit ;;
i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
# How do we know it's Interix rather than the generic POSIX subsystem?
# It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
@ -861,27 +835,6 @@ EOF
i*86:Minix:*:*)
echo ${UNAME_MACHINE}-pc-minix
exit ;;
aarch64:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
aarch64_be:Linux:*:*)
UNAME_MACHINE=aarch64_be
echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
alpha:Linux:*:*)
case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
EV5) UNAME_MACHINE=alphaev5 ;;
EV56) UNAME_MACHINE=alphaev56 ;;
PCA56) UNAME_MACHINE=alphapca56 ;;
PCA57) UNAME_MACHINE=alphapca56 ;;
EV6) UNAME_MACHINE=alphaev6 ;;
EV67) UNAME_MACHINE=alphaev67 ;;
EV68*) UNAME_MACHINE=alphaev68 ;;
esac
objdump --private-headers /bin/sh | grep -q ld.so.1
if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
exit ;;
arm*:Linux:*:*)
eval $set_cc_for_build
if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
@ -889,40 +842,20 @@ EOF
then
echo ${UNAME_MACHINE}-unknown-linux-gnu
else
if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
| grep -q __ARM_PCS_VFP
then
echo ${UNAME_MACHINE}-unknown-linux-gnueabi
else
echo ${UNAME_MACHINE}-unknown-linux-gnueabihf
fi
echo ${UNAME_MACHINE}-unknown-linux-gnueabi
fi
exit ;;
avr32*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
cris:Linux:*:*)
echo ${UNAME_MACHINE}-axis-linux-gnu
echo cris-axis-linux-gnu
exit ;;
crisv32:Linux:*:*)
echo ${UNAME_MACHINE}-axis-linux-gnu
echo crisv32-axis-linux-gnu
exit ;;
frv:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
hexagon:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
i*86:Linux:*:*)
LIBC=gnu
eval $set_cc_for_build
sed 's/^ //' << EOF >$dummy.c
#ifdef __dietlibc__
LIBC=dietlibc
#endif
EOF
eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'`
echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
echo frv-unknown-linux-gnu
exit ;;
ia64:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
@ -933,33 +866,74 @@ EOF
m68*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
mips:Linux:*:* | mips64:Linux:*:*)
mips:Linux:*:*)
eval $set_cc_for_build
sed 's/^ //' << EOF >$dummy.c
#undef CPU
#undef ${UNAME_MACHINE}
#undef ${UNAME_MACHINE}el
#undef mips
#undef mipsel
#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
CPU=${UNAME_MACHINE}el
CPU=mipsel
#else
#if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
CPU=${UNAME_MACHINE}
CPU=mips
#else
CPU=
#endif
#endif
EOF
eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
/^CPU/{
s: ::g
p
}'`"
test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
;;
mips64:Linux:*:*)
eval $set_cc_for_build
sed 's/^ //' << EOF >$dummy.c
#undef CPU
#undef mips64
#undef mips64el
#if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
CPU=mips64el
#else
#if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
CPU=mips64
#else
CPU=
#endif
#endif
EOF
eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
/^CPU/{
s: ::g
p
}'`"
test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
;;
or32:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
echo or32-unknown-linux-gnu
exit ;;
padre:Linux:*:*)
echo sparc-unknown-linux-gnu
ppc:Linux:*:*)
echo powerpc-unknown-linux-gnu
exit ;;
parisc64:Linux:*:* | hppa64:Linux:*:*)
echo hppa64-unknown-linux-gnu
ppc64:Linux:*:*)
echo powerpc64-unknown-linux-gnu
exit ;;
alpha:Linux:*:*)
case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
EV5) UNAME_MACHINE=alphaev5 ;;
EV56) UNAME_MACHINE=alphaev56 ;;
PCA56) UNAME_MACHINE=alphapca56 ;;
PCA57) UNAME_MACHINE=alphapca56 ;;
EV6) UNAME_MACHINE=alphaev6 ;;
EV67) UNAME_MACHINE=alphaev67 ;;
EV68*) UNAME_MACHINE=alphaev68 ;;
esac
objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null
if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
exit ;;
parisc:Linux:*:* | hppa:Linux:*:*)
# Look for CPU level
@ -969,17 +943,14 @@ EOF
*) echo hppa-unknown-linux-gnu ;;
esac
exit ;;
ppc64:Linux:*:*)
echo powerpc64-unknown-linux-gnu
exit ;;
ppc:Linux:*:*)
echo powerpc-unknown-linux-gnu
parisc64:Linux:*:* | hppa64:Linux:*:*)
echo hppa64-unknown-linux-gnu
exit ;;
s390:Linux:*:* | s390x:Linux:*:*)
echo ${UNAME_MACHINE}-ibm-linux
exit ;;
sh64*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
sh*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
@ -987,18 +958,78 @@ EOF
sparc:Linux:*:* | sparc64:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
tile*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
vax:Linux:*:*)
echo ${UNAME_MACHINE}-dec-linux-gnu
exit ;;
x86_64:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
echo x86_64-unknown-linux-gnu
exit ;;
xtensa*:Linux:*:*)
echo ${UNAME_MACHINE}-unknown-linux-gnu
echo ${UNAME_MACHINE}-unknown-linux-gnu
exit ;;
i*86:Linux:*:*)
# The BFD linker knows what the default object file format is, so
# first see if it will tell us. cd to the root directory to prevent
# problems with other programs or directories called `ld' in the path.
# Set LC_ALL=C to ensure ld outputs messages in English.
ld_supported_targets=`cd /; LC_ALL=C ld --help 2>&1 \
| sed -ne '/supported targets:/!d
s/[ ][ ]*/ /g
s/.*supported targets: *//
s/ .*//
p'`
case "$ld_supported_targets" in
elf32-i386)
TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu"
;;
a.out-i386-linux)
echo "${UNAME_MACHINE}-pc-linux-gnuaout"
exit ;;
coff-i386)
echo "${UNAME_MACHINE}-pc-linux-gnucoff"
exit ;;
"")
# Either a pre-BFD a.out linker (linux-gnuoldld) or
# one that does not give us useful --help.
echo "${UNAME_MACHINE}-pc-linux-gnuoldld"
exit ;;
esac
# Determine whether the default compiler is a.out or elf
eval $set_cc_for_build
sed 's/^ //' << EOF >$dummy.c
#include <features.h>
#ifdef __ELF__
# ifdef __GLIBC__
# if __GLIBC__ >= 2
LIBC=gnu
# else
LIBC=gnulibc1
# endif
# else
LIBC=gnulibc1
# endif
#else
#if defined(__INTEL_COMPILER) || defined(__PGI) || defined(__SUNPRO_C) || defined(__SUNPRO_CC)
LIBC=gnu
#else
LIBC=gnuaout
#endif
#endif
#ifdef __dietlibc__
LIBC=dietlibc
#endif
EOF
eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
/^LIBC/{
s: ::g
p
}'`"
test x"${LIBC}" != x && {
echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
exit
}
test x"${TENTATIVE}" != x && { echo "${TENTATIVE}"; exit; }
;;
i*86:DYNIX/ptx:4*:*)
# ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
# earlier versions are messed up and put the nodename in both
@ -1006,11 +1037,11 @@ EOF
echo i386-sequent-sysv4
exit ;;
i*86:UNIX_SV:4.2MP:2.*)
# Unixware is an offshoot of SVR4, but it has its own version
# number series starting with 2...
# I am not positive that other SVR4 systems won't match this,
# Unixware is an offshoot of SVR4, but it has its own version
# number series starting with 2...
# I am not positive that other SVR4 systems won't match this,
# I just have to hope. -- rms.
# Use sysv4.2uw... so that sysv4* matches it.
# Use sysv4.2uw... so that sysv4* matches it.
echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
exit ;;
i*86:OS/2:*:*)
@ -1027,7 +1058,7 @@ EOF
i*86:syllable:*:*)
echo ${UNAME_MACHINE}-pc-syllable
exit ;;
i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*)
echo i386-unknown-lynxos${UNAME_RELEASE}
exit ;;
i*86:*DOS:*:*)
@ -1042,7 +1073,7 @@ EOF
fi
exit ;;
i*86:*:5:[678]*)
# UnixWare 7.x, OpenUNIX and OpenServer 6.
# UnixWare 7.x, OpenUNIX and OpenServer 6.
case `/bin/uname -X | grep "^Machine"` in
*486*) UNAME_MACHINE=i486 ;;
*Pentium) UNAME_MACHINE=i586 ;;
@ -1070,13 +1101,10 @@ EOF
exit ;;
pc:*:*:*)
# Left here for compatibility:
# uname -m prints for DJGPP always 'pc', but it prints nothing about
# the processor, so we play safe by assuming i586.
# Note: whatever this is, it MUST be the same as what config.sub
# prints for the "djgpp" host, or else GDB configury will decide that
# this is a cross-build.
echo i586-pc-msdosdjgpp
exit ;;
# uname -m prints for DJGPP always 'pc', but it prints nothing about
# the processor, so we play safe by assuming i386.
echo i386-pc-msdosdjgpp
exit ;;
Intel:Mach:3*:*)
echo i386-pc-mach3
exit ;;
@ -1111,18 +1139,8 @@ EOF
/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
&& { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
&& { echo i486-ncr-sysv4; exit; } ;;
NCR*:*:4.2:* | MPRAS*:*:4.2:*)
OS_REL='.3'
test -r /etc/.relid \
&& OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
&& { echo i486-ncr-sysv4.3${OS_REL}; exit; }
/bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
&& { echo i586-ncr-sysv4.3${OS_REL}; exit; }
/bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
&& { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
/bin/uname -p 2>/dev/null | grep 86 >/dev/null \
&& { echo i486-ncr-sysv4; exit; } ;;
m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
echo m68k-unknown-lynxos${UNAME_RELEASE}
exit ;;
@ -1135,7 +1153,7 @@ EOF
rs6000:LynxOS:2.*:*)
echo rs6000-unknown-lynxos${UNAME_RELEASE}
exit ;;
PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*)
echo powerpc-unknown-lynxos${UNAME_RELEASE}
exit ;;
SM[BE]S:UNIX_SV:*:*)
@ -1155,10 +1173,10 @@ EOF
echo ns32k-sni-sysv
fi
exit ;;
PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
# says <Richard.M.Bartel@ccMail.Census.GOV>
echo i586-unisys-sysv4
exit ;;
PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
# says <Richard.M.Bartel@ccMail.Census.GOV>
echo i586-unisys-sysv4
exit ;;
*:UNIX_System_V:4*:FTX*)
# From Gerald Hewes <hewes@openmarket.com>.
# How about differentiating between stratus architectures? -djm
@ -1184,11 +1202,11 @@ EOF
exit ;;
R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
if [ -d /usr/nec ]; then
echo mips-nec-sysv${UNAME_RELEASE}
echo mips-nec-sysv${UNAME_RELEASE}
else
echo mips-unknown-sysv${UNAME_RELEASE}
echo mips-unknown-sysv${UNAME_RELEASE}
fi
exit ;;
exit ;;
BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
echo powerpc-be-beos
exit ;;
@ -1198,9 +1216,6 @@ EOF
BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
echo i586-pc-beos
exit ;;
BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
echo i586-pc-haiku
exit ;;
SX-4:SUPER-UX:*:*)
echo sx4-nec-superux${UNAME_RELEASE}
exit ;;
@ -1228,16 +1243,6 @@ EOF
*:Darwin:*:*)
UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
case $UNAME_PROCESSOR in
i386)
eval $set_cc_for_build
if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then
if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
(CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \
grep IS_64BIT_ARCH >/dev/null
then
UNAME_PROCESSOR="x86_64"
fi
fi ;;
unknown) UNAME_PROCESSOR=powerpc ;;
esac
echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
@ -1253,9 +1258,6 @@ EOF
*:QNX:*:4*)
echo i386-pc-qnx
exit ;;
NEO-?:NONSTOP_KERNEL:*:*)
echo neo-tandem-nsk${UNAME_RELEASE}
exit ;;
NSE-?:NONSTOP_KERNEL:*:*)
echo nse-tandem-nsk${UNAME_RELEASE}
exit ;;
@ -1301,13 +1303,13 @@ EOF
echo pdp10-unknown-its
exit ;;
SEI:*:*:SEIUX)
echo mips-sei-seiux${UNAME_RELEASE}
echo mips-sei-seiux${UNAME_RELEASE}
exit ;;
*:DragonFly:*:*)
echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
exit ;;
*:*VMS:*:*)
UNAME_MACHINE=`(uname -p) 2>/dev/null`
UNAME_MACHINE=`(uname -p) 2>/dev/null`
case "${UNAME_MACHINE}" in
A*) echo alpha-dec-vms ; exit ;;
I*) echo ia64-dec-vms ; exit ;;
@ -1322,12 +1324,6 @@ EOF
i*86:rdos:*:*)
echo ${UNAME_MACHINE}-pc-rdos
exit ;;
i*86:AROS:*:*)
echo ${UNAME_MACHINE}-pc-aros
exit ;;
x86_64:VMkernel:*:*)
echo ${UNAME_MACHINE}-unknown-esx
exit ;;
esac
#echo '(No uname command or uname output not recognized.)' 1>&2
@ -1350,11 +1346,11 @@ main ()
#include <sys/param.h>
printf ("m68k-sony-newsos%s\n",
#ifdef NEWSOS4
"4"
"4"
#else
""
""
#endif
); exit (0);
); exit (0);
#endif
#endif

View File

@ -1,10 +1,10 @@
#! /bin/sh
# Configuration validation subroutine script.
# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
# 2011, 2012 Free Software Foundation, Inc.
# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
# Free Software Foundation, Inc.
timestamp='2012-04-18'
timestamp='2008-01-16'
# This file is (in principle) common to ALL GNU software.
# The presence of a machine in this file suggests that SOME GNU software
@ -21,7 +21,9 @@ timestamp='2012-04-18'
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
# 02110-1301, USA.
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
@ -30,16 +32,13 @@ timestamp='2012-04-18'
# Please send patches to <config-patches@gnu.org>. Submit a context
# diff and a properly formatted GNU ChangeLog entry.
# diff and a properly formatted ChangeLog entry.
#
# Configuration subroutine to validate and canonicalize a configuration type.
# Supply the specified configuration type as an argument.
# If it is invalid, we print an error message on stderr and exit with code 1.
# Otherwise, we print the canonical config type on stdout and succeed.
# You can get the latest version of this script from:
# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD
# This file is supposed to be the same for all GNU packages
# and recognize all the CPU types, system types and aliases
# that are meaningful with *any* GNU software.
@ -73,9 +72,8 @@ Report bugs and patches to <config-patches@gnu.org>."
version="\
GNU config.sub ($timestamp)
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012
Free Software Foundation, Inc.
Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
@ -122,18 +120,12 @@ esac
# Here we must recognize all the valid KERNEL-OS combinations.
maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
case $maybe_os in
nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
knetbsd*-gnu* | netbsd*-gnu* | \
kopensolaris*-gnu* | \
nto-qnx* | linux-gnu* | linux-dietlibc | linux-newlib* | linux-uclibc* | \
uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | \
storm-chaos* | os2-emx* | rtmk-nova*)
os=-$maybe_os
basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
;;
android-linux)
os=-linux-android
basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown
;;
*)
basic_machine=`echo $1 | sed 's/-[^-]*$//'`
if [ $basic_machine != $1 ]
@ -156,13 +148,10 @@ case $os in
-convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
-c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
-harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
-apple | -axis | -knuth | -cray | -microblaze)
-apple | -axis | -knuth | -cray)
os=
basic_machine=$1
;;
-bluegene*)
os=-cnk
;;
-sim | -cisco | -oki | -wec | -winbond)
os=
basic_machine=$1
@ -177,10 +166,10 @@ case $os in
os=-chorusos
basic_machine=$1
;;
-chorusrdb)
os=-chorusrdb
-chorusrdb)
os=-chorusrdb
basic_machine=$1
;;
;;
-hiux*)
os=-hiuxwe2
;;
@ -225,12 +214,6 @@ case $os in
-isc*)
basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
;;
-lynx*178)
os=-lynxos178
;;
-lynx*5)
os=-lynxos5
;;
-lynx*)
os=-lynxos
;;
@ -255,32 +238,24 @@ case $basic_machine in
# Some are omitted here because they have special meanings below.
1750a | 580 \
| a29k \
| aarch64 | aarch64_be \
| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
| am33_2.0 \
| arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
| be32 | be64 \
| bfin \
| c4x | clipper \
| d10v | d30v | dlx | dsp16xx \
| epiphany \
| fido | fr30 | frv \
| h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
| hexagon \
| i370 | i860 | i960 | ia64 \
| ip2k | iq2000 \
| le32 | le64 \
| lm32 \
| m32c | m32r | m32rle | m68000 | m68k | m88k \
| maxq | mb | microblaze | mcore | mep | metag \
| maxq | mb | microblaze | mcore | mep \
| mips | mipsbe | mipseb | mipsel | mipsle \
| mips16 \
| mips64 | mips64el \
| mips64octeon | mips64octeonel \
| mips64orion | mips64orionel \
| mips64r5900 | mips64r5900el \
| mips64vr | mips64vrel \
| mips64orion | mips64orionel \
| mips64vr4100 | mips64vr4100el \
| mips64vr4300 | mips64vr4300el \
| mips64vr5000 | mips64vr5000el \
@ -293,42 +268,29 @@ case $basic_machine in
| mipsisa64sr71k | mipsisa64sr71kel \
| mipstx39 | mipstx39el \
| mn10200 | mn10300 \
| moxie \
| mt \
| msp430 \
| nds32 | nds32le | nds32be \
| nios | nios2 \
| ns16k | ns32k \
| open8 \
| or32 \
| pdp10 | pdp11 | pj | pjl \
| powerpc | powerpc64 | powerpc64le | powerpcle \
| powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \
| pyramid \
| rl78 | rx \
| score \
| sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
| sh | sh[1234] | sh[24]a | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
| sh64 | sh64le \
| sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
| sparcv8 | sparcv9 | sparcv9b | sparcv9v \
| spu \
| tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
| ubicom32 \
| v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
| spu | strongarm \
| tahoe | thumb | tic4x | tic80 | tron \
| v850 | v850e \
| we32k \
| x86 | xc16x | xstormy16 | xtensa \
| z8k | z80)
| x86 | xc16x | xscale | xscalee[bl] | xstormy16 | xtensa \
| z8k)
basic_machine=$basic_machine-unknown
;;
c54x)
basic_machine=tic54x-unknown
;;
c55x)
basic_machine=tic55x-unknown
;;
c6x)
basic_machine=tic6x-unknown
;;
m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip)
m6811 | m68hc11 | m6812 | m68hc12)
# Motorola 68HC11/12.
basic_machine=$basic_machine-unknown
os=-none
;;
@ -338,21 +300,6 @@ case $basic_machine in
basic_machine=mt-unknown
;;
strongarm | thumb | xscale)
basic_machine=arm-unknown
;;
xgate)
basic_machine=$basic_machine-unknown
os=-none
;;
xscaleeb)
basic_machine=armeb-unknown
;;
xscaleel)
basic_machine=armel-unknown
;;
# We use `pc' rather than `unknown'
# because (1) that's what they normally are, and
# (2) the word "unknown" tends to confuse beginning users.
@ -367,36 +314,29 @@ case $basic_machine in
# Recognize the basic CPU types with company name.
580-* \
| a29k-* \
| aarch64-* | aarch64_be-* \
| alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
| alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
| arm-* | armbe-* | armle-* | armeb-* | armv*-* \
| avr-* | avr32-* \
| be32-* | be64-* \
| bfin-* | bs2000-* \
| c[123]* | c30-* | [cjt]90-* | c4x-* \
| c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \
| clipper-* | craynv-* | cydra-* \
| d10v-* | d30v-* | dlx-* \
| elxsi-* \
| f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
| h8300-* | h8500-* \
| hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
| hexagon-* \
| i*86-* | i860-* | i960-* | ia64-* \
| ip2k-* | iq2000-* \
| le32-* | le64-* \
| lm32-* \
| m32c-* | m32r-* | m32rle-* \
| m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
| m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \
| m88110-* | m88k-* | maxq-* | mcore-* \
| mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
| mips16-* \
| mips64-* | mips64el-* \
| mips64octeon-* | mips64octeonel-* \
| mips64orion-* | mips64orionel-* \
| mips64r5900-* | mips64r5900el-* \
| mips64vr-* | mips64vrel-* \
| mips64orion-* | mips64orionel-* \
| mips64vr4100-* | mips64vr4100el-* \
| mips64vr4300-* | mips64vr4300el-* \
| mips64vr5000-* | mips64vr5000el-* \
@ -411,32 +351,27 @@ case $basic_machine in
| mmix-* \
| mt-* \
| msp430-* \
| nds32-* | nds32le-* | nds32be-* \
| nios-* | nios2-* \
| none-* | np1-* | ns16k-* | ns32k-* \
| open8-* \
| orion-* \
| pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
| powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
| powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \
| pyramid-* \
| rl78-* | romp-* | rs6000-* | rx-* \
| sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
| romp-* | rs6000-* \
| sh-* | sh[1234]-* | sh[24]a-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
| shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
| sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
| sparclite-* \
| sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \
| tahoe-* \
| sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | strongarm-* | sv1-* | sx?-* \
| tahoe-* | thumb-* \
| tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
| tile*-* \
| tron-* \
| ubicom32-* \
| v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
| vax-* \
| v850-* | v850e-* | vax-* \
| we32k-* \
| x86-* | x86_64-* | xc16x-* | xps100-* \
| x86-* | x86_64-* | xc16x-* | xps100-* | xscale-* | xscalee[bl]-* \
| xstormy16-* | xtensa*-* \
| ymp-* \
| z8k-* | z80-*)
| z8k-*)
;;
# Recognize the basic CPU types without company name, with glob match.
xtensa*)
@ -458,7 +393,7 @@ case $basic_machine in
basic_machine=a29k-amd
os=-udi
;;
abacus)
abacus)
basic_machine=abacus-unknown
;;
adobe68k)
@ -504,10 +439,6 @@ case $basic_machine in
basic_machine=m68k-apollo
os=-bsd
;;
aros)
basic_machine=i386-pc
os=-aros
;;
aux)
basic_machine=m68k-apple
os=-aux
@ -524,27 +455,10 @@ case $basic_machine in
basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
os=-linux
;;
bluegene*)
basic_machine=powerpc-ibm
os=-cnk
;;
c54x-*)
basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'`
;;
c55x-*)
basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'`
;;
c6x-*)
basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'`
;;
c90)
basic_machine=c90-cray
os=-unicos
;;
cegcc)
basic_machine=arm-unknown
os=-cegcc
;;
convex-c1)
basic_machine=c1-convex
os=-bsd
@ -573,7 +487,7 @@ case $basic_machine in
basic_machine=craynv-cray
os=-unicosmp
;;
cr16 | cr16-*)
cr16)
basic_machine=cr16-unknown
os=-elf
;;
@ -612,10 +526,6 @@ case $basic_machine in
basic_machine=m88k-motorola
os=-sysv3
;;
dicos)
basic_machine=i686-pc
os=-dicos
;;
djgpp)
basic_machine=i586-pc
os=-msdosdjgpp
@ -731,6 +641,7 @@ case $basic_machine in
i370-ibm* | ibm*)
basic_machine=i370-ibm
;;
# I'm not sure what "Sysv32" means. Should this be sysv3.2?
i*86v32)
basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
os=-sysv32
@ -788,9 +699,6 @@ case $basic_machine in
basic_machine=ns32k-utek
os=-sysv
;;
microblaze)
basic_machine=microblaze-xilinx
;;
mingw32)
basic_machine=i386-pc
os=-mingw32
@ -827,18 +735,10 @@ case $basic_machine in
ms1-*)
basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
;;
msys)
basic_machine=i386-pc
os=-msys
;;
mvs)
basic_machine=i370-ibm
os=-mvs
;;
nacl)
basic_machine=le32-unknown
os=-nacl
;;
ncr3000)
basic_machine=i486-ncr
os=-sysv4
@ -903,12 +803,6 @@ case $basic_machine in
np1)
basic_machine=np1-gould
;;
neo-tandem)
basic_machine=neo-tandem
;;
nse-tandem)
basic_machine=nse-tandem
;;
nsr-tandem)
basic_machine=nsr-tandem
;;
@ -991,10 +885,9 @@ case $basic_machine in
;;
power) basic_machine=power-ibm
;;
ppc | ppcbe) basic_machine=powerpc-unknown
ppc) basic_machine=powerpc-unknown
;;
ppc-* | ppcbe-*)
basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
;;
ppcle | powerpclittle | ppc-le | powerpc-little)
basic_machine=powerpcle-unknown
@ -1088,9 +981,6 @@ case $basic_machine in
basic_machine=i860-stratus
os=-sysv4
;;
strongarm-* | thumb-*)
basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'`
;;
sun2)
basic_machine=m68000-sun
;;
@ -1147,8 +1037,20 @@ case $basic_machine in
basic_machine=t90-cray
os=-unicos
;;
tic54x | c54x*)
basic_machine=tic54x-unknown
os=-coff
;;
tic55x | c55x*)
basic_machine=tic55x-unknown
os=-coff
;;
tic6x | c6x*)
basic_machine=tic6x-unknown
os=-coff
;;
tile*)
basic_machine=$basic_machine-unknown
basic_machine=tile-unknown
os=-linux-gnu
;;
tx39)
@ -1218,9 +1120,6 @@ case $basic_machine in
xps | xps100)
basic_machine=xps100-honeywell
;;
xscale-* | xscalee[bl]-*)
basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'`
;;
ymp)
basic_machine=ymp-cray
os=-unicos
@ -1229,10 +1128,6 @@ case $basic_machine in
basic_machine=z8k-unknown
os=-sim
;;
z80-*-coff)
basic_machine=z80-unknown
os=-sim
;;
none)
basic_machine=none-none
os=-none
@ -1271,7 +1166,7 @@ case $basic_machine in
we32k)
basic_machine=we32k-att
;;
sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
sh[1234] | sh[24]a | sh[34]eb | sh[1234]le | sh[23]ele)
basic_machine=sh-unknown
;;
sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
@ -1318,12 +1213,9 @@ esac
if [ x"$os" != x"" ]
then
case $os in
# First match some system type aliases
# that might get confused with valid system types.
# First match some system type aliases
# that might get confused with valid system types.
# -solaris* is a basic system type, with this one exception.
-auroraux)
os=-auroraux
;;
-solaris1 | -solaris1.*)
os=`echo $os | sed -e 's|solaris1|sunos4|'`
;;
@ -1344,11 +1236,10 @@ case $os in
# Each alternative MUST END IN A *, to match a version number.
# -sysv* is not here because it comes later, after sysvr4.
-gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
| -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
| -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
| -sym* | -kopensolaris* \
| -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\
| -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
| -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
| -aos* | -aros* \
| -aos* \
| -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
| -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
| -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
@ -1357,10 +1248,9 @@ case $os in
| -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
| -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
| -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
| -chorusos* | -chorusrdb* | -cegcc* \
| -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
| -mingw32* | -linux-gnu* | -linux-android* \
| -linux-newlib* | -linux-uclibc* \
| -chorusos* | -chorusrdb* \
| -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
| -mingw32* | -linux-gnu* | -linux-newlib* | -linux-uclibc* \
| -uxpv* | -beos* | -mpeix* | -udk* \
| -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
| -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
@ -1368,7 +1258,7 @@ case $os in
| -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
| -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
| -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
| -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*)
| -skyos* | -haiku* | -rdos* | -toppers* | -drops*)
# Remember, each alternative MUST END IN *, to match a version number.
;;
-qnx*)
@ -1407,7 +1297,7 @@ case $os in
-opened*)
os=-openedition
;;
-os400*)
-os400*)
os=-os400
;;
-wince*)
@ -1456,7 +1346,7 @@ case $os in
-sinix*)
os=-sysv4
;;
-tpf*)
-tpf*)
os=-tpf
;;
-triton*)
@ -1498,11 +1388,6 @@ case $os in
-zvmoe)
os=-zvmoe
;;
-dicos*)
os=-dicos
;;
-nacl*)
;;
-none)
;;
*)
@ -1525,10 +1410,10 @@ else
# system, and we'll never get to this point.
case $basic_machine in
score-*)
score-*)
os=-elf
;;
spu-*)
spu-*)
os=-elf
;;
*-acorn)
@ -1540,20 +1425,8 @@ case $basic_machine in
arm*-semi)
os=-aout
;;
c4x-* | tic4x-*)
os=-coff
;;
hexagon-*)
os=-elf
;;
tic54x-*)
os=-coff
;;
tic55x-*)
os=-coff
;;
tic6x-*)
os=-coff
c4x-* | tic4x-*)
os=-coff
;;
# This must come before the *-dec entry.
pdp10-*)
@ -1573,11 +1446,14 @@ case $basic_machine in
;;
m68000-sun)
os=-sunos3
# This also exists in the configure program, but was not the
# default.
# os=-sunos4
;;
m68*-cisco)
os=-aout
;;
mep-*)
mep-*)
os=-elf
;;
mips*-cisco)
@ -1604,7 +1480,7 @@ case $basic_machine in
*-ibm)
os=-aix
;;
*-knuth)
*-knuth)
os=-mmixware
;;
*-wec)
@ -1709,7 +1585,7 @@ case $basic_machine in
-sunos*)
vendor=sun
;;
-cnk*|-aix*)
-aix*)
vendor=ibm
;;
-beos*)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -6,7 +6,7 @@
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 2001 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
# Copyright (C) 2001 - 2008, Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
@ -19,12 +19,15 @@
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# $Id: curl-config.in,v 1.35 2008-09-03 19:10:48 danf Exp $
###########################################################################
#
# The idea to this kind of setup info script was stolen from numerous
# other packages, such as neon, libxml and gnome.
#
prefix=@prefix@
exec_prefix=@exec_prefix@
includedir=@includedir@
cppflag_curl_staticlib=@CPPFLAG_CURL_STATICLIB@
usage()
{
@ -33,12 +36,10 @@ Usage: curl-config [OPTION]
Available values for OPTION include:
--built-shared says 'yes' if libcurl was built shared
--ca ca bundle install path
--cc compiler
--cflags pre-processor and compiler flags
--checkfor [version] check for (lib)curl of the specified version
--configure the arguments given to configure when building curl
--features newline separated list of enabled features
--help display this help and exit
--libs library linking information
@ -66,10 +67,6 @@ while test $# -gt 0; do
esac
case "$1" in
--built-shared)
echo @ENABLE_SHARED@
;;
--ca)
echo "@CURL_CA_BUNDLE@"
;;
@ -93,9 +90,8 @@ while test $# -gt 0; do
echo "$protocol"
done
;;
--version)
echo libcurl @CURLVERSION@
echo libcurl @VERSION@
exit 0
;;
@ -114,7 +110,7 @@ while test $# -gt 0; do
# silent success
exit 0
else
echo "requested version $checkfor is newer than existing @CURLVERSION@"
echo "requested version $checkfor is newer than existing @VERSION@"
exit 1
fi
;;
@ -129,15 +125,10 @@ while test $# -gt 0; do
;;
--cflags)
if test "X$cppflag_curl_staticlib" = "X-DCURL_STATICLIB"; then
CPPFLAG_CURL_STATICLIB="-DCURL_STATICLIB "
else
CPPFLAG_CURL_STATICLIB=""
fi
if test "X@includedir@" = "X/usr/include"; then
echo "$CPPFLAG_CURL_STATICLIB"
echo ""
else
echo "${CPPFLAG_CURL_STATICLIB}-I@includedir@"
echo "-I@includedir@"
fi
;;
@ -148,20 +139,16 @@ while test $# -gt 0; do
CURLLIBDIR=""
fi
if test "X@REQUIRE_LIB_DEPS@" = "Xyes"; then
echo ${CURLLIBDIR}-lcurl @LIBCURL_LIBS@
echo ${CURLLIBDIR}-lcurl @LDFLAGS@ @LIBCURL_LIBS@ @LIBS@
else
echo ${CURLLIBDIR}-lcurl
echo ${CURLLIBDIR}-lcurl @LDFLAGS@ @LIBS@
fi
;;
--static-libs)
echo @libdir@/libcurl.@libext@ @LDFLAGS@ @LIBCURL_LIBS@
echo @libdir@/libcurl.@libext@ @LDFLAGS@ @LIBCURL_LIBS@ @LIBS@
;;
--configure)
echo @CONFIGURE_OPTIONS@
;;
*)
echo "unknown option: $1"
usage 1

View File

@ -1,4 +1,5 @@
;;;; Emacs Lisp help for writing curl code. ;;;;
;;;; $Id: curl-style.el,v 1.13 2004/10/06 07:50:18 bagder Exp $
;;; The curl hacker's C conventions.
;;; See the sample.emacs file on how this file can be made to take
@ -34,7 +35,7 @@
(setq tab-width 8
indent-tabs-mode nil ; Use spaces. Not tabs.
comment-column 40
c-font-lock-extra-types (append '("bool" "CURL" "CURLcode" "ssize_t" "size_t" "curl_socklen_t" "fd_set" "time_t" "curl_off_t" "curl_socket_t" "in_addr_t" "CURLSHcode" "CURLMcode" "Curl_addrinfo"))
c-font-lock-extra-types (append '("bool" "CURL" "CURLcode" "ssize_t" "size_t" "socklen_t" "fd_set" "time_t" "curl_off_t" "curl_socket_t" "in_addr_t" "CURLSHcode" "CURLMcode" "Curl_addrinfo"))
)
;; keybindings for C, C++, and Objective-C. We can put these in
;; c-mode-base-map because of inheritance ...

View File

@ -1,10 +1,9 @@
#! /bin/sh
# depcomp - compile a program generating dependencies as side-effects
scriptversion=2012-03-27.16; # UTC
scriptversion=2005-07-09.11
# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2006, 2007, 2009, 2010,
# 2011, 2012 Free Software Foundation, Inc.
# Copyright (C) 1999, 2000, 2003, 2004, 2005 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@ -17,7 +16,9 @@ scriptversion=2012-03-27.16; # UTC
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
@ -28,7 +29,7 @@ scriptversion=2012-03-27.16; # UTC
case $1 in
'')
echo "$0: No command. Try '$0 --help' for more information." 1>&2
echo "$0: No command. Try \`$0 --help' for more information." 1>&2
exit 1;
;;
-h | --h*)
@ -40,11 +41,11 @@ as side-effects.
Environment variables:
depmode Dependency tracking mode.
source Source file read by 'PROGRAMS ARGS'.
object Object file output by 'PROGRAMS ARGS'.
source Source file read by `PROGRAMS ARGS'.
object Object file output by `PROGRAMS ARGS'.
DEPDIR directory where to store dependencies.
depfile Dependency file to output.
tmpdepfile Temporary file to use when outputting dependencies.
tmpdepfile Temporary file to use when outputing dependencies.
libtool Whether libtool is used (yes/no).
Report bugs to <bug-automake@gnu.org>.
@ -57,12 +58,6 @@ EOF
;;
esac
# A tabulation character.
tab=' '
# A newline character.
nl='
'
if test -z "$depmode" || test -z "$source" || test -z "$object"; then
echo "depcomp: Variables source, object and depmode must be set" 1>&2
exit 1
@ -91,48 +86,12 @@ if test "$depmode" = dashXmstdout; then
depmode=dashmstdout
fi
cygpath_u="cygpath -u -f -"
if test "$depmode" = msvcmsys; then
# This is just like msvisualcpp but w/o cygpath translation.
# Just convert the backslash-escaped backslashes to single forward
# slashes to satisfy depend.m4
cygpath_u='sed s,\\\\,/,g'
depmode=msvisualcpp
fi
if test "$depmode" = msvc7msys; then
# This is just like msvc7 but w/o cygpath translation.
# Just convert the backslash-escaped backslashes to single forward
# slashes to satisfy depend.m4
cygpath_u='sed s,\\\\,/,g'
depmode=msvc7
fi
if test "$depmode" = xlc; then
# IBM C/C++ Compilers xlc/xlC can output gcc-like dependency informations.
gccflag=-qmakedep=gcc,-MF
depmode=gcc
fi
case "$depmode" in
gcc3)
## gcc 3 implements dependency tracking that does exactly what
## we want. Yay! Note: for some reason libtool 1.4 doesn't like
## it if -MD -MP comes after the -MF stuff. Hmm.
## Unfortunately, FreeBSD c89 acceptance of flags depends upon
## the command line argument order; so add the flags where they
## appear in depend2.am. Note that the slowdown incurred here
## affects only configure: in makefiles, %FASTDEP% shortcuts this.
for arg
do
case $arg in
-c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;;
*) set fnord "$@" "$arg" ;;
esac
shift # fnord
shift # $arg
done
"$@"
"$@" -MT "$object" -MD -MP -MF "$tmpdepfile"
stat=$?
if test $stat -eq 0; then :
else
@ -168,21 +127,20 @@ gcc)
## The second -e expression handles DOS-style file names with drive letters.
sed -e 's/^[^:]*: / /' \
-e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile"
## This next piece of magic avoids the "deleted header file" problem.
## This next piece of magic avoids the `deleted header file' problem.
## The problem is that when a header file which appears in a .P file
## is deleted, the dependency causes make to die (because there is
## typically no way to rebuild the header). We avoid this by adding
## dummy dependencies for each header file. Too bad gcc doesn't do
## this for us directly.
tr ' ' "$nl" < "$tmpdepfile" |
## Some versions of gcc put a space before the ':'. On the theory
tr ' ' '
' < "$tmpdepfile" |
## Some versions of gcc put a space before the `:'. On the theory
## that the space means something, we add a space to the output as
## well. hp depmode also adds that space, but also prefixes the VPATH
## to the object. Take care to not repeat it in the output.
## well.
## Some versions of the HPUX 10.20 sed can't process this invocation
## correctly. Breaking it into two sed invocations is a workaround.
sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \
| sed -e 's/$/ :/' >> "$depfile"
sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
rm -f "$tmpdepfile"
;;
@ -214,17 +172,20 @@ sgi)
# clever and replace this with sed code, as IRIX sed won't handle
# lines with more than a fixed number of characters (4096 in
# IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines;
# the IRIX cc adds comments like '#:fec' to the end of the
# the IRIX cc adds comments like `#:fec' to the end of the
# dependency line.
tr ' ' "$nl" < "$tmpdepfile" \
tr ' ' '
' < "$tmpdepfile" \
| sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' | \
tr "$nl" ' ' >> "$depfile"
echo >> "$depfile"
tr '
' ' ' >> $depfile
echo >> $depfile
# The second pass generates a dummy entry for each header file.
tr ' ' "$nl" < "$tmpdepfile" \
tr ' ' '
' < "$tmpdepfile" \
| sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \
>> "$depfile"
>> $depfile
else
# The sourcefile does not contain any dependencies, so just
# store a dummy comment line, to avoid errors with the Makefile
@ -234,51 +195,40 @@ sgi)
rm -f "$tmpdepfile"
;;
xlc)
# This case exists only to let depend.m4 do its work. It works by
# looking at the text of this script. This case will never be run,
# since it is checked for above.
exit 1
;;
aix)
# The C for AIX Compiler uses -M and outputs the dependencies
# in a .u file. In older versions, this file always lives in the
# current directory. Also, the AIX compiler puts '$object:' at the
# current directory. Also, the AIX compiler puts `$object:' at the
# start of each line; $object doesn't have directory information.
# Version 6 uses the directory in both cases.
dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
test "x$dir" = "x$object" && dir=
base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
stripped=`echo "$object" | sed 's/\(.*\)\..*$/\1/'`
tmpdepfile="$stripped.u"
if test "$libtool" = yes; then
tmpdepfile1=$dir$base.u
tmpdepfile2=$base.u
tmpdepfile3=$dir.libs/$base.u
"$@" -Wc,-M
else
tmpdepfile1=$dir$base.u
tmpdepfile2=$dir$base.u
tmpdepfile3=$dir$base.u
"$@" -M
fi
stat=$?
if test -f "$tmpdepfile"; then :
else
stripped=`echo "$stripped" | sed 's,^.*/,,'`
tmpdepfile="$stripped.u"
fi
if test $stat -eq 0; then :
else
rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
rm -f "$tmpdepfile"
exit $stat
fi
for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3"
do
test -f "$tmpdepfile" && break
done
if test -f "$tmpdepfile"; then
# Each line is of the form 'foo.o: dependent.h'.
outname="$stripped.o"
# Each line is of the form `foo.o: dependent.h'.
# Do two passes, one to just change these to
# '$object: dependent.h' and one to simply 'dependent.h:'.
sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile"
sed -e 's,^.*\.[a-z]*:['"$tab"' ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile"
# `$object: dependent.h' and one to simply `dependent.h:'.
sed -e "s,^$outname:,$object :," < "$tmpdepfile" > "$depfile"
sed -e "s,^$outname: \(.*\)$,\1:," < "$tmpdepfile" >> "$depfile"
else
# The sourcefile does not contain any dependencies, so just
# store a dummy comment line, to avoid errors with the Makefile
@ -289,26 +239,23 @@ aix)
;;
icc)
# Intel's C compiler anf tcc (Tiny C Compiler) understand '-MD -MF file'.
# However on
# $CC -MD -MF foo.d -c -o sub/foo.o sub/foo.c
# Intel's C compiler understands `-MD -MF file'. However on
# icc -MD -MF foo.d -c -o sub/foo.o sub/foo.c
# ICC 7.0 will fill foo.d with something like
# foo.o: sub/foo.c
# foo.o: sub/foo.h
# which is wrong. We want
# which is wrong. We want:
# sub/foo.o: sub/foo.c
# sub/foo.o: sub/foo.h
# sub/foo.c:
# sub/foo.h:
# ICC 7.1 will output
# foo.o: sub/foo.c sub/foo.h
# and will wrap long lines using '\':
# and will wrap long lines using \ :
# foo.o: sub/foo.c ... \
# sub/foo.h ... \
# ...
# tcc 0.9.26 (FIXME still under development at the moment of writing)
# will emit a similar output, but also prepend the continuation lines
# with horizontal tabulation characters.
"$@" -MD -MF "$tmpdepfile"
stat=$?
if test $stat -eq 0; then :
@ -317,74 +264,23 @@ icc)
exit $stat
fi
rm -f "$depfile"
# Each line is of the form 'foo.o: dependent.h',
# or 'foo.o: dep1.h dep2.h \', or ' dep3.h dep4.h \'.
# Each line is of the form `foo.o: dependent.h',
# or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'.
# Do two passes, one to just change these to
# '$object: dependent.h' and one to simply 'dependent.h:'.
sed -e "s/^[ $tab][ $tab]*/ /" -e "s,^[^:]*:,$object :," \
< "$tmpdepfile" > "$depfile"
sed '
s/[ '"$tab"'][ '"$tab"']*/ /g
s/^ *//
s/ *\\*$//
s/^[^:]*: *//
/^$/d
/:$/d
s/$/ :/
' < "$tmpdepfile" >> "$depfile"
# `$object: dependent.h' and one to simply `dependent.h:'.
sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile"
# Some versions of the HPUX 10.20 sed can't process this invocation
# correctly. Breaking it into two sed invocations is a workaround.
sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" |
sed -e 's/$/ :/' >> "$depfile"
rm -f "$tmpdepfile"
;;
hp2)
# The "hp" stanza above does not work with aCC (C++) and HP's ia64
# compilers, which have integrated preprocessors. The correct option
# to use with these is +Maked; it writes dependencies to a file named
# 'foo.d', which lands next to the object file, wherever that
# happens to be.
# Much of this is similar to the tru64 case; see comments there.
dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
test "x$dir" = "x$object" && dir=
base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'`
if test "$libtool" = yes; then
tmpdepfile1=$dir$base.d
tmpdepfile2=$dir.libs/$base.d
"$@" -Wc,+Maked
else
tmpdepfile1=$dir$base.d
tmpdepfile2=$dir$base.d
"$@" +Maked
fi
stat=$?
if test $stat -eq 0; then :
else
rm -f "$tmpdepfile1" "$tmpdepfile2"
exit $stat
fi
for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2"
do
test -f "$tmpdepfile" && break
done
if test -f "$tmpdepfile"; then
sed -e "s,^.*\.[a-z]*:,$object:," "$tmpdepfile" > "$depfile"
# Add 'dependent.h:' lines.
sed -ne '2,${
s/^ *//
s/ \\*$//
s/$/:/
p
}' "$tmpdepfile" >> "$depfile"
else
echo "#dummy" > "$depfile"
fi
rm -f "$tmpdepfile" "$tmpdepfile2"
;;
tru64)
# The Tru64 compiler uses -MD to generate dependencies as a side
# effect. 'cc -MD -o foo.o ...' puts the dependencies into 'foo.o.d'.
# effect. `cc -MD -o foo.o ...' puts the dependencies into `foo.o.d'.
# At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put
# dependencies in 'foo.d' instead, so we check for that too.
# dependencies in `foo.d' instead, so we check for that too.
# Subdirectories are respected.
dir=`echo "$object" | sed -e 's|/[^/]*$|/|'`
test "x$dir" = "x$object" && dir=
@ -392,13 +288,13 @@ tru64)
if test "$libtool" = yes; then
# With Tru64 cc, shared objects can also be used to make a
# static library. This mechanism is used in libtool 1.4 series to
# static library. This mecanism is used in libtool 1.4 series to
# handle both shared and static libraries in a single compilation.
# With libtool 1.4, dependencies were output in $dir.libs/$base.lo.d.
#
# With libtool 1.5 this exception was removed, and libtool now
# generates 2 separate objects for the 2 libraries. These two
# compilations output dependencies in $dir.libs/$base.o.d and
# compilations output dependencies in in $dir.libs/$base.o.d and
# in $dir$base.o.d. We have to check for both files, because
# one of the two compilations can be disabled. We should prefer
# $dir$base.o.d over $dir.libs/$base.o.d because the latter is
@ -430,59 +326,14 @@ tru64)
done
if test -f "$tmpdepfile"; then
sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile"
sed -e 's,^.*\.[a-z]*:['"$tab"' ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile"
# That's a tab and a space in the [].
sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile"
else
echo "#dummy" > "$depfile"
fi
rm -f "$tmpdepfile"
;;
msvc7)
if test "$libtool" = yes; then
showIncludes=-Wc,-showIncludes
else
showIncludes=-showIncludes
fi
"$@" $showIncludes > "$tmpdepfile"
stat=$?
grep -v '^Note: including file: ' "$tmpdepfile"
if test "$stat" = 0; then :
else
rm -f "$tmpdepfile"
exit $stat
fi
rm -f "$depfile"
echo "$object : \\" > "$depfile"
# The first sed program below extracts the file names and escapes
# backslashes for cygpath. The second sed program outputs the file
# name when reading, but also accumulates all include files in the
# hold buffer in order to output them again at the end. This only
# works with sed implementations that can handle large buffers.
sed < "$tmpdepfile" -n '
/^Note: including file: *\(.*\)/ {
s//\1/
s/\\/\\\\/g
p
}' | $cygpath_u | sort -u | sed -n '
s/ /\\ /g
s/\(.*\)/'"$tab"'\1 \\/p
s/.\(.*\) \\/\1:/
H
$ {
s/.*/'"$tab"'/
G
p
}' >> "$depfile"
rm -f "$tmpdepfile"
;;
msvc7msys)
# This case exists only to let depend.m4 do its work. It works by
# looking at the text of this script. This case will never be run,
# since it is checked for above.
exit 1
;;
#nosideeffect)
# This comment above is used by automake to tell side-effect
# dependency tracking mechanisms from slower ones.
@ -494,13 +345,13 @@ dashmstdout)
# Remove the call to Libtool.
if test "$libtool" = yes; then
while test "X$1" != 'X--mode=compile'; do
while test $1 != '--mode=compile'; do
shift
done
shift
fi
# Remove '-o $object'.
# Remove `-o $object'.
IFS=" "
for arg
do
@ -520,14 +371,15 @@ dashmstdout)
done
test -z "$dashmflag" && dashmflag=-M
# Require at least two characters before searching for ':'
# Require at least two characters before searching for `:'
# in the target name. This is to cope with DOS-style filenames:
# a dependency such as 'c:/foo/bar' could be seen as target 'c' otherwise.
# a dependency such as `c:/foo/bar' could be seen as target `c' otherwise.
"$@" $dashmflag |
sed 's:^['"$tab"' ]*[^:'"$tab"' ][^:][^:]*\:['"$tab"' ]*:'"$object"'\: :' > "$tmpdepfile"
sed 's:^[ ]*[^: ][^:][^:]*\:[ ]*:'"$object"'\: :' > "$tmpdepfile"
rm -f "$depfile"
cat < "$tmpdepfile" > "$depfile"
tr ' ' "$nl" < "$tmpdepfile" | \
tr ' ' '
' < "$tmpdepfile" | \
## Some versions of the HPUX 10.20 sed can't process this invocation
## correctly. Breaking it into two sed invocations is a workaround.
sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
@ -544,46 +396,38 @@ makedepend)
"$@" || exit $?
# Remove any Libtool call
if test "$libtool" = yes; then
while test "X$1" != 'X--mode=compile'; do
while test $1 != '--mode=compile'; do
shift
done
shift
fi
# X makedepend
shift
cleared=no eat=no
for arg
do
cleared=no
for arg in "$@"; do
case $cleared in
no)
set ""; shift
cleared=yes ;;
esac
if test $eat = yes; then
eat=no
continue
fi
case "$arg" in
-D*|-I*)
set fnord "$@" "$arg"; shift ;;
# Strip any option that makedepend may not understand. Remove
# the object too, otherwise makedepend will parse it as a source file.
-arch)
eat=yes ;;
-*|$object)
;;
*)
set fnord "$@" "$arg"; shift ;;
esac
done
obj_suffix=`echo "$object" | sed 's/^.*\././'`
obj_suffix="`echo $object | sed 's/^.*\././'`"
touch "$tmpdepfile"
${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@"
rm -f "$depfile"
# makedepend may prepend the VPATH from the source file name to the object.
# No need to regex-escape $object, excess matching of '.' is harmless.
sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile"
sed '1,2d' "$tmpdepfile" | tr ' ' "$nl" | \
cat < "$tmpdepfile" > "$depfile"
sed '1,2d' "$tmpdepfile" | tr ' ' '
' | \
## Some versions of the HPUX 10.20 sed can't process this invocation
## correctly. Breaking it into two sed invocations is a workaround.
sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile"
@ -597,13 +441,13 @@ cpp)
# Remove the call to Libtool.
if test "$libtool" = yes; then
while test "X$1" != 'X--mode=compile'; do
while test $1 != '--mode=compile'; do
shift
done
shift
fi
# Remove '-o $object'.
# Remove `-o $object'.
IFS=" "
for arg
do
@ -635,27 +479,13 @@ cpp)
msvisualcpp)
# Important note: in order to support this mode, a compiler *must*
# always write the preprocessed file to stdout.
# always write the preprocessed file to stdout, regardless of -o,
# because we must use -o when running libtool.
"$@" || exit $?
# Remove the call to Libtool.
if test "$libtool" = yes; then
while test "X$1" != 'X--mode=compile'; do
shift
done
shift
fi
IFS=" "
for arg
do
case "$arg" in
-o)
shift
;;
$object)
shift
;;
"-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI")
set fnord "$@"
shift
@ -668,23 +498,16 @@ msvisualcpp)
;;
esac
done
"$@" -E 2>/dev/null |
sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile"
"$@" -E |
sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::echo "`cygpath -u \\"\1\\"`":p' | sort | uniq > "$tmpdepfile"
rm -f "$depfile"
echo "$object : \\" > "$depfile"
sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::'"$tab"'\1 \\:p' >> "$depfile"
echo "$tab" >> "$depfile"
sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile"
. "$tmpdepfile" | sed 's% %\\ %g' | sed -n '/^\(.*\)$/ s:: \1 \\:p' >> "$depfile"
echo " " >> "$depfile"
. "$tmpdepfile" | sed 's% %\\ %g' | sed -n '/^\(.*\)$/ s::\1\::p' >> "$depfile"
rm -f "$tmpdepfile"
;;
msvcmsys)
# This case exists only to let depend.m4 do its work. It works by
# looking at the text of this script. This case will never be run,
# since it is checked for above.
exit 1
;;
none)
exec "$@"
;;
@ -703,6 +526,5 @@ exit 0
# eval: (add-hook 'write-file-hooks 'time-stamp)
# time-stamp-start: "scriptversion="
# time-stamp-format: "%:y-%02m-%02d.%02H"
# time-stamp-time-zone: "UTC"
# time-stamp-end: "; # UTC"
# time-stamp-end: "$"
# End:

View File

@ -32,7 +32,7 @@ C
C++
Written by Jean-Philippe Barrette-LaPierre
http://curlpp.org/
http://rrette.com/textpattern/index.php?s=cURLpp
Ch
@ -54,19 +54,11 @@ Dylan
Written by Chris Double
http://dylanlibs.sourceforge.net/
Eiffel
Written by Eiffel Software
http://curl.haxx.se/libcurl/eiffel/
Euphoria
Written by Ray Smith
http://rays-web.com/eulibcurl.htm
Falcon
http://www.falconpl.org/index.ftd?page_id=prjs&prj_id=curl
Ferite
Written by Paul Querna

View File

@ -1,61 +1,45 @@
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
$Id: BUGS,v 1.8 2007-07-19 21:35:36 bagder Exp $
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
BUGS
1. Bugs
1.1 There are still bugs
1.2 Where to report
1.3 What to report
1.4 libcurl problems
1.5 Who will fix the problems
1.6 How to get a stack trace
1.7 Bugs in libcurl bindings
==============================================================================
1.1 There are still bugs
Curl and libcurl have grown substantially since the beginning. At the time
of writing (January 2013), there are about 83,000 lines of source code, and
by the time you read this it has probably grown even more.
of writing (July 2007), there are about 47000 lines of source code, and by
the time you read this it has probably grown even more.
Of course there are lots of bugs left. And lots of misfeatures.
To help us make curl the stable and solid product we want it to be, we need
bug reports and bug fixes.
1.2 Where to report
WHERE TO REPORT
If you can't fix a bug yourself and submit a fix for it, try to report an as
detailed report as possible to a curl mailing list to allow one of us to
have a go at a solution. You can optionally also post your bug/problem at
curl's bug tracking system over at
have a go at a solution. You should also post your bug/problem at curl's bug
tracking system over at
https://sourceforge.net/p/curl/bugs/
http://sourceforge.net/bugs/?group_id=976
Please read the rest of this document below first before doing that! Also,
you need to login to your sourceforge account before being able to submit a
bug report (necessary evil done to avoid spam).
(but please read the sections below first before doing that)
If you feel you need to ask around first, find a suitable mailing list and
post there. The lists are available on http://curl.haxx.se/mail/
1.3 What to report
WHAT TO REPORT
When reporting a bug, you should include all information that will help us
understand what's wrong, what you expected to happen and how to repeat the
bad behavior. You therefore need to tell us:
- your operating system's name and version number
- your operating system's name and version number (uname -a under a unix
is fine)
- what version of curl you're using (curl -V is fine)
- versions of the used libraries that libcurl is built to use
- what URL you were working with (if possible), at least which protocol
and anything and everything else you think matters. Tell us what you
@ -76,48 +60,7 @@ BUGS
The address and how to subscribe to the mailing lists are detailed in the
MANUAL file.
1.4 libcurl problems
First, post all libcurl problems on the curl-library mailing list.
When you've written your own application with libcurl to perform transfers,
it is even more important to be specific and detailed when reporting bugs.
Tell us the libcurl version and your operating system. Tell us the name and
version of all relevant sub-components like for example the SSL library
you're using and what name resolving your libcurl uses. If you use SFTP or
SCP, the libssh2 version is relevant etc.
Showing us a real source code example repeating your problem is the best way
to get our attention and it will greatly increase our chances to understand
your problem and to work on a fix (if we agree it truly is a problem).
Lots of problems that appear to be libcurl problems are actually just abuses
of the libcurl API or other malfunctions in your applications. It is advised
that you run your problematic program using a memory debug tool like
valgrind or similar before you post memory-related or "crashing" problems to
us.
1.5 Who will fix the problems
If the problems or bugs you describe are considered to be bugs, we want to
have the problems fixed.
There are no developers in the curl project that are paid to work on bugs.
All developers that take on reported bugs do this on a voluntary basis. We
do it out of an ambition to keep curl and libcurl excellent products and out
of pride.
But please do not assume that you can just lump over something to us and it
will then magically be fixed after some given time. Most often we need
feedback and help to understand what you've experienced and how to repeat a
problem. Then we may only be able to assist YOU to debug the problem and to
track down the proper fix.
We get reports from many people every month and each report can take a
considerable amount of time to really go to the bottom with.
1.6 How to get a stack trace
HOW TO GET A STACK TRACE
First, you must make sure that you compile all sources with -g and that you
don't 'strip' the final executable. Try to avoid optimizing the code as
@ -137,12 +80,3 @@ BUGS
crashed. Include the stack trace with your detailed bug report. It'll help a
lot.
1.7 Bugs in libcurl bindings
There will of course pop up bugs in libcurl bindings. You should then
primarily approach the team that works on that particular binding and see
what you can do to help them fix the problem.
If you suspect that the problem exists in the underlying libcurl, then
please convert your program over to plain C and follow the steps outlined
above.

View File

@ -28,13 +28,10 @@
2.10 Document
2.11 Test Cases
3. Pushing Out Your Changes
3.1 Write Access to git Repository
3.2 How To Make a Patch with git
3.3 How To Make a Patch without git
3.4 How to get your changes into the main sources
3.5 Write good commit messages
3.6 Please don't send pull requests
3. Pushing Out Your Changes
3.1 Write Access to CVS Repository
3.2 How To Make a Patch
3.3 How to get your changes into the main sources
==============================================================================
@ -117,7 +114,7 @@
2.4 Line Lengths
We write source lines shorter than 80 columns.
We try to keep source lines shorter than 80 columns.
2.5 General Style
@ -155,7 +152,7 @@
Please try to get the latest available sources to make your patches
against. It makes the life of the developers so much easier. The very best is
if you get the most up-to-date sources from the git repository, but the
if you get the most up-to-date sources from the CVS repository, but the
latest release archive is quite OK as well!
2.10 Document
@ -178,43 +175,18 @@
test case that verifies that it works as documented. If every submitter also
posts a few test cases, it won't end up as a heavy burden on a single person!
3. Pushing Out Your Changes
3. Pushing Out Your Changes
3.1 Write Access to git Repository
3.1 Write Access to CVS Repository
If you are a frequent contributor, or have another good reason, you can of
course get write access to the git repository and then you'll be able to push
your changes straight into the git repo instead of sending changes by mail as
patches. Just ask if this is what you'd want. You will be required to have
posted a few quality patches first, before you can be granted push access.
course get write access to the CVS repository and then you'll be able to
check-in all your changes straight into the CVS tree instead of sending all
changes by mail as patches. Just ask if this is what you'd want. You will be
required to have posted a few quality patches first, before you can be
granted write access.
3.2 How To Make a Patch with git
You need to first checkout the repository:
git clone git://github.com/bagder/curl.git
You then proceed and edit all the files you like and you commit them to your
local repository:
git commit [file]
As usual, group your commits so that you commit all changes that at once that
constitutes a logical change. See also section "3.5 Write good commit
messages".
Once you have done all your commits and you're happy with what you see, you
can make patches out of your changes that are suitable for mailing:
git format-patch remotes/origin/master
This creates files in your local directory named NNNN-[name].patch for each
commit.
Now send those patches off to the curl-library list. You can of course opt to
do that with the 'get send-email' command.
3.3 How To Make a Patch without git
3.2 How To Make a Patch
Keep a copy of the unmodified curl sources. Make your changes in a separate
source tree. When you think you have something that you want to offer the
@ -234,70 +206,28 @@
For unix-like operating systems:
http://www.gnu.org/software/patch/patch.html
http://www.gnu.org/directory/diffutils.html
http://www.gnu.org/software/patch/patch.html
http://www.gnu.org/directory/diffutils.html
For Windows:
http://gnuwin32.sourceforge.net/packages/patch.htm
http://gnuwin32.sourceforge.net/packages/diffutils.htm
http://gnuwin32.sourceforge.net/packages/patch.htm
http://gnuwin32.sourceforge.net/packages/diffutils.htm
3.4 How to get your changes into the main sources
3.3 How to get your changes into the main sources
Submit your patch to the curl-library mailing list.
1. Submit your patch to the curl-library mailing list
Make the patch against as recent sources as possible.
2. Make the patch against as recent sources as possible.
Make sure your patch adheres to the source indent and coding style of already
existing source code. Failing to do so just adds more work for me.
3. Make sure your patch adheres to the source indent and coding style of
already existing source code. Failing to do so just adds more work for me.
Respond to replies on the list about the patch and answer questions and/or
fix nits/flaws. This is very important. I will take lack of replies as a sign
that you're not very anxious to get your patch accepted and I tend to simply
drop such patches from my TODO list.
4. Respond to replies on the list about the patch and answer questions and/or
fix nits/flaws. This is very important. I will take lack of replies as a
sign that you're not very anxious to get your patch accepted and I tend to
simply drop such patches from my TODO list.
If you've followed the above paragraphs and your patch still hasn't been
incorporated after some weeks, consider resubmitting it to the list.
3.5 Write good commit messages
A short guide to how to do fine commit messages in the curl project.
---- start ----
[area]: [short line describing the main effect]
[separate the above single line from the rest with an empty line]
[full description, no wider than 72 columns that describe as much as
possible as to why this change is made, and possibly what things
it fixes and everything else that is related]
---- stop ----
Don't forget to use commit --author="" if you commit someone else's work,
and make sure that you have your own user and email setup correctly in git
before you commit
3.6 Please don't send pull requests
With git (and especially github) it is easy and tempting to send a pull
request to one or more people in the curl project to have changes merged this
way instead of mailing patches to the curl-library mailing list.
We don't like that. We want them mailed for these reasons:
- Peer review. Anyone and everyone on the list can review, comment and
improve on the patch. Pull requests limit this ability.
- Anyone can merge the patch into their own trees for testing and those who
have push rights can push it to the main repo. It doesn't have to be anyone
the patch author knows beforehand.
- Commit messages can be tweaked and changed if merged locally instead of
using github. Merges directly on github requires the changes to be perfect
already, which they seldom are.
- Merges on github prevents rebases and even enforces --no-ff which is a git
style we don't otherwise use in the project
However: once patches have been reviewed and deemed fine on list they are
perfectly OK to be pulled from a published git tree.
5. If you've followed the above mentioned paragraphs and your patch still
hasn't been incorporated after some weeks, consider resubmitting it to the
list.

View File

@ -11,7 +11,7 @@ Condition
This document reflects my view and understanding of these things. Please tell
me where and how you think I'm wrong, and I'll try to correct my mistakes.
Background
Background
The Free Software Foundation has deemed the Original BSD license[1] to be
"incompatible"[2] with GPL[3]. I'd rather say it is the other way around, but

View File

@ -1,3 +1,4 @@
Updated: Sep 24, 2008 (http://curl.haxx.se/docs/faq.html)
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
@ -18,9 +19,6 @@ FAQ
1.9 Where do I buy commercial support for curl?
1.10 How many are using curl?
1.11 Why don't you update ca-bundle.crt
1.12 I have a problem who can I chat with?
1.13 curl's ECCN number?
1.14 How do I submit my patch?
2. Install Related Problems
2.1 configure doesn't find OpenSSL even when it is installed
@ -28,14 +26,14 @@ FAQ
2.1.2 only the libssl lib is missing
2.2 Does curl work/build with other SSL libraries?
2.3 Where can I find a copy of LIBEAY32.DLL?
2.4 Does curl support SOCKS (RFC 1928) ?
2.4 Does curl support Socks (RFC 1928) ?
3. Usage Problems
3.1 curl: (1) SSL is disabled, https: not supported
3.2 How do I tell curl to resume a transfer?
3.3 Why doesn't my posting using -F work?
3.4 How do I tell curl to run custom FTP commands?
3.5 How can I disable the Accept: */* header?
3.5 How can I disable the Pragma: nocache header?
3.6 Does curl support ASP, XML, XHTML or HTML version Y?
3.7 Can I use curl to delete/rename a file through FTP?
3.8 How do I tell curl to follow HTTP redirects?
@ -44,15 +42,11 @@ FAQ
3.11 How do I POST with a different Content-Type?
3.12 Why do FTP specific features over HTTP proxy fail?
3.13 Why does my single/double quotes fail?
3.14 Does curl support Javascript or PAC (automated proxy config)?
3.14 Does curl support javascript or pac (automated proxy config)?
3.15 Can I do recursive fetches with curl?
3.16 What certificates do I need when I use SSL?
3.17 How do I list the root dir of an FTP server?
3.18 Can I use curl to send a POST/PUT and not wait for a response?
3.19 How do I get HTTP from a host using a specific IP address?
3.20 How to SFTP from my user's home directory?
3.21 Protocol xxx not supported or disabled in libcurl
3.22 curl -X gives me HTTP problems
4. Running Problems
4.1 Problems connecting to SSL servers.
@ -77,9 +71,6 @@ FAQ
4.14 Redirects work in browser but not with curl!
4.15 FTPS doesn't work
4.16 My HTTP POST or PUT requests are slow!
4.17 Non-functional connect timeouts on Windows
4.18 file:// URLs containing drive letters (Windows, NetWare)
4.19 Why doesn't cURL return an error when the network cable is unplugged?
5. libcurl Issues
5.1 Is libcurl thread-safe?
@ -89,16 +80,13 @@ FAQ
5.5 Does CURLOPT_WRITEDATA and CURLOPT_READDATA work on win32 ?
5.6 What about Keep-Alive or persistent connections?
5.7 Link errors when building libcurl on Windows!
5.8 libcurl.so.X: open failed: No such file or directory
5.8 libcurl.so.3: open failed: No such file or directory
5.9 How does libcurl resolve host names?
5.10 How do I prevent libcurl from writing the response to stdout?
5.11 How do I make libcurl not receive the whole HTTP response?
5.12 Can I make libcurl fake or hide my real IP address?
5.13 How do I stop an ongoing transfer?
5.14 Using C++ non-static functions for callbacks?
5.15 How do I get an FTP directory listing?
5.16 I want a different time-out!
5.17 Can I write a server with libcurl?
6. License Issues
6.1 I have a GPL program, can I use the libcurl library?
@ -107,11 +95,11 @@ FAQ
6.4 I have a program that uses LGPL libraries, can I use libcurl?
6.5 Can I modify curl/libcurl for my program and keep the changes secret?
6.6 Can you please change the curl/libcurl license to XXXX?
6.7 What are my obligations when using libcurl in my commercial apps?
6.7 What are my obligations when using libcurl in my commerical apps?
7. PHP/CURL Issues
7.1 What is PHP/CURL?
7.2 Who wrote PHP/CURL?
7.2 Who write PHP/CURL?
7.3 Can I perform multiple requests using the same handle?
==============================================================================
@ -130,19 +118,16 @@ FAQ
libcurl
A free and easy-to-use client-side URL transfer library, supporting DICT,
FILE, FTP, FTPS, GOPHER, HTTP, HTTPS, IMAP, IMAPS, LDAP, LDAPS, POP3,
POP3S, RTMP, RTSP, SCP, SFTP, SMTP, SMTPS, TELNET and TFTP.
libcurl supports HTTPS certificates, HTTP POST, HTTP PUT, FTP uploading,
kerberos, HTTP form based upload, proxies, cookies, user+password
authentication, file transfer resume, http proxy tunneling and more!
A free and easy-to-use client-side URL transfer library, supporting FTP,
FTPS, HTTP, HTTPS, SCP, SFTP, TFTP, TELNET, DICT, FILE and LDAP. libcurl
supports HTTPS certificates, HTTP POST, HTTP PUT, FTP uploading, kerberos,
HTTP form based upload, proxies, cookies, user+password authentication,
file transfer resume, http proxy tunneling and more!
libcurl is highly portable, it builds and works identically on numerous
platforms, including Solaris, NetBSD, FreeBSD, OpenBSD, Darwin, HPUX,
IRIX, AIX, Tru64, Linux, UnixWare, HURD, Windows, Amiga, OS/2, BeOS, Mac
OS X, Ultrix, QNX, OpenVMS, RISC OS, Novell NetWare, DOS, Symbian, OSF,
Android, Minix, IBM TPF and more...
IRIX, AIX, Tru64, Linux, UnixWare, HURD, Windows, Amiga, OS/2, BeOs, Mac
OS X, Ultrix, QNX, OpenVMS, RISC OS, Novell NetWare, DOS and more...
libcurl is free, thread-safe, IPv6 compatible, feature rich, well
supported and fast.
@ -151,8 +136,9 @@ FAQ
A command line tool for getting or sending files using URL syntax.
Since curl uses libcurl, curl supports the same wide range of common
Internet protocols that libcurl does.
Since curl uses libcurl, it supports a range of common Internet protocols,
currently including HTTP, HTTPS, FTP, FTPS, SCP, SFTP, TFTP, LDAP, DICT,
TELNET and FILE.
We pronounce curl and cURL with an initial k sound: [kurl].
@ -239,16 +225,13 @@ FAQ
1.6 What do you get for making curl?
Project cURL is entirely free and open. No person gets paid for developing
(lib)curl on full or even part time. We do this voluntarily on our spare
time. Occasionally companies pay individual developers to work on curl, but
that's up to each company and developer. It is not controlled by nor
supervised in any way by the project.
(lib)curl. We do this voluntarily on our spare time.
We still get help from companies. Haxx provides web site, bandwidth, mailing
lists etc and sourceforge.net hosts project services we take advantage from,
like the bug tracker. Also again, some companies have sponsored certain
parts of the development in the past and I hope some will continue to do so
in the future.
We get some help from companies. Contactor Data hosts the curl web site,
Haxx owns the curl web site's domain and sourceforge.net hosts project
services we take advantage from, like the bug tracker. Also, some companies
have sponsored certain parts of the development in the past and I hope some
will continue to do so in the future.
If you want to support our project, consider a donation or a banner-program
or even better: by helping us coding, documenting, testing etc.
@ -308,10 +291,34 @@ FAQ
We don't know how many users that downloaded or installed curl and then
never use it.
In May 2012 Daniel did a counting game and came up with a number that may
be completely wrong or somewhat accurate. 300 million!
Some facts to use as input to the math:
See http://daniel.haxx.se/blog/2012/05/16/300m-users/
curl packages are downloaded from the curl.haxx.se and mirrors over a
million times per year. curl is installed by default with most Linux
distributions. curl is installed by default with Mac OS X. curl and libcurl
as used by numerous applications that include libcurl binaries in their
distribution packages (like Adobe Acrobat Reader and Google Earth).
More than 80 known named companies use curl in commercial environments and
products. More than 100 known named open source projects depend on
(lib)curl.
In a poll on the curl web site mid-2005, more than 50% of the 300+ answers
estimated a user base of one million users or more.
In March 2005, the "Linux Counter project" estimated a total Linux user base
of some 29 millions, while Netcraft detected some 4 million "active" Linux
based web servers. A guess is that a fair amount of these Linux
installations have curl installed.
All this taken together, there is no doubt that there are millions of
(lib)curl users.
http://curl.haxx.se/docs/companies.html
http://curl.haxx.se/docs/programs.html
http://curl.haxx.se/libcurl/using/apps.html
http://counter.li.org/estimates.php
http://news.netcraft.com/archives/2005/03/14/fedora_makes_rapid_progress.html
1.11 Why don't you update ca-bundle.crt
@ -336,47 +343,6 @@ FAQ
Firefox (by running 'make ca-bundle), or by using our online service setup
for this purpose: http://curl.haxx.se/docs/caextract.html
1.12 I have a problem who can I chat with?
There's a bunch of friendly people hanging out in the #curl channel on the
IRC network irc.freenode.net. If you're polite and nice, chances are big
that you can get -- or provide -- help instantly.
1.13 curl's ECCN number?
The US government restricts exports of software that contains or uses
cryptography. When doing so, the Export Control Classification Number (ECCN)
is used to identify the level of export control etc.
ASF gives a good explanation at http://www.apache.org/dev/crypto.html
We believe curl's number might be ECCN 5D002, another possibility is
5D992. It seems necessary to write them, asking to confirm.
Comprehensible explanations of the meaning of such numbers and how to
obtain them (resp.) are here
http://www.bis.doc.gov/licensing/exportingbasics.htm
http://www.bis.doc.gov/licensing/do_i_needaneccn.html
An incomprehensible description of the two numbers above is here
http://www.access.gpo.gov/bis/ear/pdf/ccl5-pt2.pdf
1.14 How do I submit my patch?
When you have made a patch or a change of whatever sort, and want to submit
that to the project, there are a few different ways we prefer:
o send a patch to the curl-library mailing list. We're many subscribers
there and there are lots of people who can review patches, comment on them
and "receive" them properly.
o if your patch changes or fixes a bug, you can also opt to submit a bug
report in the bug tracker and attach your patch there. There are less
people involved there.
Lots of more details are found in the CONTRIBUTE and INTERNALS docs.
2. Install Related Problems
@ -419,15 +385,10 @@ FAQ
2.2 Does curl work/build with other SSL libraries?
Curl has been written to use a generic SSL function layer internally, and
that SSL functionality can then be provided by one out of many different SSL
backends.
curl can be built to use one of the following SSL alternatives: OpenSSL,
GnuTLS, yassl, NSS, PolarSSL, axTLS, Secure Transport (native iOS/OS X),
schannel (native Windows) or qssl (native IBM i). They all have their pros
and cons, and we try to maintain a comparison of them here:
http://curl.haxx.se/docs/ssl-compared.html
Curl has been written to use OpenSSL, GnuTLS, yassl or NSS, although there
should not be many problems using a different library. If anyone does "port"
curl to use a different SSL library, we are of course very interested in
getting the patch!
2.3 Where can I find a copy of LIBEAY32.DLL?
@ -438,7 +399,7 @@ FAQ
accurate and up-to-date pointers to recent OpenSSL DLLs and other binary
packages.
2.4 Does curl support SOCKS (RFC 1928) ?
2.4 Does curl support Socks (RFC 1928) ?
Yes, SOCKS 4 and 5 are supported.
@ -448,25 +409,22 @@ FAQ
3.1 curl: (1) SSL is disabled, https: not supported
If you get this output when trying to get anything from a https:// server,
it means that the instance of curl/libcurl that you're using was built
without support for this protocol.
This could've happened if the configure script that was run at build time
couldn't find all libs and include files curl requires for SSL to work. If
the configure script fails to find them, curl is simply built without SSL
support.
it means that the configure script couldn't find all libs and include files
it requires for SSL to work. If the configure script fails to find them,
curl is simply built without SSL support.
To get the https:// support into a curl that was previously built but that
reports that https:// is not supported, you should dig through the document
and logs and check out why the configure script doesn't find the SSL libs
and/or include files.
Also, check out the other paragraph in this FAQ labelled "configure doesn't
Also, check out the other paragraph in this FAQ labeled "configure doesn't
find OpenSSL even when it is installed".
3.2 How do I tell curl to resume a transfer?
Curl supports resumed transfers both ways on both FTP and HTTP.
Try the -C option.
3.3 Why doesn't my posting using -F work?
@ -488,16 +446,15 @@ FAQ
You can tell curl to perform optional commands both before and/or after a
file transfer. Study the -Q/--quote option.
Since curl is used for file transfers, you don't normally use curl to
perform FTP commands without transferring anything. Therefore you must
always specify a URL to transfer to/from even when doing custom FTP
commands, or use -I which implies the "no body" option sent to libcurl.
Since curl is used for file transfers, you don't use curl to just perform
FTP commands without transferring anything. Therefore you must always specify
a URL to transfer to/from even when doing custom FTP commands.
3.5 How can I disable the Accept: */* header?
3.5 How can I disable the Pragma: nocache header?
You can change all internally generated headers by adding a replacement with
the -H/--header option. By adding a header with empty contents you safely
disable that one. Use -H "Accept:" to disable that specific header.
disable that one. Use -H "Pragma:" to disable that specific header.
3.6 Does curl support ASP, XML, XHTML or HTML version Y?
@ -541,19 +498,12 @@ FAQ
install and use them, in the libcurl section of the curl web site:
http://curl.haxx.se/libcurl/
All the various bindings to libcurl are made by other projects and people,
outside of the cURL project. The cURL project itself only produces libcurl
with its plain C API. If you don't find anywhere else to ask you can ask
about bindings on the curl-library list too, but be prepared that people on
that list may not know anything about bindings.
In October 2009, there were interfaces available for the following
languages: Ada95, Basic, C, C++, Ch, Cocoa, D, Dylan, Eiffel, Euphoria,
Ferite, Gambas, glib/GTK+, Haskell, ILE/RPG, Java, Lisp, Lua, Mono, .NET,
Object-Pascal, O'Caml, Pascal, Perl, PHP, PostgreSQL, Python, R, Rexx, Ruby,
Scheme, S-Lang, Smalltalk, SP-Forth, SPL, Tcl, Visual Basic, Visual FoxPro,
Q, wxwidgets and XBLite. By the time you read this, additional ones may have
appeared!
In February 2007, there are interfaces available for the following
languages: Ada95, Basic, C, C++, Ch, Cocoa, D, Dylan, Euphoria, Ferite,
Gambas, glib/GTK+, Java, Lisp, Lua, Mono, .NET, Object-Pascal, O'Caml,
Pascal, Perl, PHP, PostgreSQL, Python, R, Rexx, Ruby, Scheme, S-Lang,
Smalltalk, SPL, Tcl, Visual Basic, Q, wxwidgets and XBLite. By the time you
read this, additional ones may have appeared!
3.10 What about SOAP, WebDAV, XML-RPC or similar protocols over HTTP?
@ -607,27 +557,27 @@ FAQ
Remember that curl works and runs on more operating systems than most single
individuals have ever tried.
3.14 Does curl support Javascript or PAC (automated proxy config)?
3.14 Does curl support javascript or pac (automated proxy config)?
Many web pages do magic stuff using embedded Javascript. Curl and libcurl
Many web pages do magic stuff using embedded javascript. Curl and libcurl
have no built-in support for that, so it will be treated just like any other
contents.
.pac files are a netscape invention and are sometimes used by organizations
to allow them to differentiate which proxies to use. The .pac contents is
just a Javascript program that gets invoked by the browser and that returns
the name of the proxy to connect to. Since curl doesn't support Javascript,
just a javascript program that gets invoked by the browser and that returns
the name of the proxy to connect to. Since curl doesn't support javascript,
it can't support .pac proxy configuration either.
Some workarounds usually suggested to overcome this Javascript dependency:
Some work-arounds usually suggested to overcome this javascript dependency:
- Depending on the Javascript complexity, write up a script that
- Depending on the javascript complexity, write up a script that
translates it to another language and execute that.
- Read the Javascript code and rewrite the same logic in another language.
- Read the javascript code and rewrite the same logic in another language.
- Implement a Javascript interpreter, people have successfully used the
Mozilla Javascript engine in the past.
- Implement a javascript interpreter, people have successfully used the
Mozilla javascript engine in the past.
- Ask your admins to stop this, for a static proxy setup or similar.
@ -649,9 +599,6 @@ FAQ
provide this in order to prove that you actually are who you claim to be.
If the server doesn't require this, you don't need a client certificate.
A client certificate is always used together with a private key, and the
private key has a pass phrase that protects it.
- Server certificate. The server you communicate with has a server
certificate. You can and should verify this certificate to make sure that
you are truly talking to the real server and not a server impersonating
@ -659,9 +606,8 @@ FAQ
- Certificate Authority certificate ("CA cert"). You often have several CA
certs in a CA cert bundle that can be used to verify a server certificate
that was signed by one of the authorities in the bundle. curl does not
come with a CA cert bundle but most curl installs provide one. You can
also override the default.
that was signed by one of the authorities in the bundle. curl comes with a
default CA cert bundle. You can override the default.
The server certificate verification process is made by using a Certificate
Authority certificate ("CA cert") that was used to sign the server
@ -687,84 +633,11 @@ FAQ
curl ftp://ftp.sunet.se//tmp/
3.18 Can I use curl to send a POST/PUT and not wait for a response?
No.
But you could easily write your own program using libcurl to do such stunts.
3.19 How do I get HTTP from a host using a specific IP address?
For example, you may be trying out a web site installation that isn't yet in
the DNS. Or you have a site using multiple IP addresses for a given host
name and you want to address a specific one out of the set.
Set a custom Host: header that identifies the server name you want to reach
but use the target IP address in the URL:
curl --header "Host: www.example.com" http://127.0.0.1/
You can also opt to add faked host name entries to curl with the --resolve
option. That has the added benefit that things like redirects will also work
properly. The above operation would instead be done as:
curl --resolve www.example.com:80:127.0.0.1 http://www.example.com/
3.20 How to SFTP from my user's home directory?
Contrary to how FTP works, SFTP and SCP URLs specify the exact directory to
work with. It means that if you don't specify that you want the user's home
directory, you get the actual root directory.
To specify a file in your user's home directory, you need to use the correct
URL syntax which for sftp might look similar to:
curl -O -u user:password sftp://example.com/~/file.txt
and for SCP it is just a different protocol prefix:
curl -O -u user:password scp://example.com/~/file.txt
3.21 Protocol xxx not supported or disabled in libcurl
When passing on a URL to curl to use, it may respond that the particular
protocol is not supported or disabled. The particular way this error message
is phrased is because curl doesn't make a distinction internally of whether
a particular protocol is not supported (ie never got any code added that
knows how to speak that protocol) or if it was explicitly disabled. curl can
be built to only support a given set of protocols, and the rest would then
be disabled or not supported.
Note that this error will also occur if you pass a wrongly spelled protocol
part as in "htpt://example.com" or as in the less evident case if you prefix
the protocol part with a space as in " http://example.com/".
3.22 curl -X gives me HTTP problems
In normal circumstances, -X should hardly ever be used.
By default you use curl without explicitly saying which request method to
use when the URL identifies a HTTP transfer. If you just pass in a URL like
"curl http://example.com" it will use GET. If you use -d or -F curl will use
POST, -I will cause a HEAD and -T will make it a PUT.
If for whatever reason you're not happy with these default choices that curl
does for you, you can override those request methods by specifying -X
[WHATEVER]. This way you can for example send a DELETE by doing "curl -X
DELETE [URL]".
It is thus pointless to do "curl -XGET [URL]" as GET would be used
anyway. In the same vein it is pointless to do "curl -X POST -d data
[URL]"... But you can make a fun and somewhat rare request that sends a
request-body in a GET request with something like "curl -X GET -d data
[URL]"
Note that -X doesn't change curl's behavior. It only modifies the actual
string sent in the request.
Accordingly, by using -XPOST on a command line that for example would follow
a 303 redirect, you will effectively prevent curl from behaving
correctly. Be aware.
4. Running Problems
@ -785,22 +658,21 @@ FAQ
4.2 Why do I get problems when I use & or % in the URL?
In general unix shells, the & symbol is treated specially and when used, it
In general unix shells, the & letter is treated special and when used, it
runs the specified command in the background. To safely send the & as a part
of a URL, you should quote the entire URL by using single (') or double (")
quotes around it. Similar problems can also occur on some shells with other
characters, including ?*!$~(){}<>\|;`. When in doubt, quote the URL.
quotes around it.
An example that would invoke a remote CGI that uses &-symbols could be:
An example that would invoke a remote CGI that uses &-letters could be:
curl 'http://www.altavista.com/cgi-bin/query?text=yes&q=curl'
In Windows, the standard DOS shell treats the %-symbol specially and you
need to use TWO %-symbols for each single one you want to use in the URL.
In Windows, the standard DOS shell treats the %-letter specially and you
need to use TWO %-letters for each single one you want to use in the URL.
Also note that if you want the literal %-symbol to be part of the data you
Also note that if you want the literal %-letter to be part of the data you
pass in a POST using -d/--data you must encode it as '%25' (which then also
needs the %-symbol doubled on Windows machines).
needs the %-letter doubled on Windows machines).
4.3 How can I use {, }, [ or ] to specify multiple URLs?
@ -842,7 +714,7 @@ FAQ
4.5.3 "403 Forbidden"
The server understood the request, but is refusing to fulfil it.
The server understood the request, but is refusing to fulfill it.
Authorization will not help and the request SHOULD NOT be repeated.
4.5.4 "404 Not Found"
@ -913,8 +785,10 @@ FAQ
4.9 Curl can't authenticate to the server that requires NTLM?
NTLM support requires OpenSSL, GnuTLS, NSS, Secure Transport, or Microsoft
Windows libraries at build-time to provide this functionality.
This is supported in curl 7.10.6 or later. No earlier curl version knows
of this magic. Later versions require the OpenSSL or Microsoft Windows
libraries to provide this functionality. Using GnuTLS or NSS libraries will
not provide NTLM authentication functionality in curl.
NTLM is a Microsoft proprietary protocol. Proprietary formats are evil. You
should not use such ones.
@ -974,7 +848,7 @@ FAQ
- Meta tags. You can write a HTML tag that will cause the browser to
redirect to another given URL after a certain time.
- Javascript. You can write a Javascript program embedded in a HTML page
- Javascript. You can write a javascript program embeded in a HTML page
that redirects the browser to another given URL.
There is no way to make curl follow these redirects. You must either
@ -1010,75 +884,6 @@ FAQ
You can disable libcurl's use of the Expect: header the same way you disable
any header, using -H / CURLOPT_HTTPHEADER, or by forcing it to use HTTP 1.0.
4.17 Non-functional connect timeouts
In most Windows setups having a timeout longer than 21 seconds make no
difference, as it will only send 3 TCP SYN packets and no more. The second
packet sent three seconds after the first and the third six seconds after
the second. No more than three packets are sent, no matter how long the
timeout is set.
See option TcpMaxConnectRetransmissions on this page:
http://support.microsoft.com/?scid=kb%3Ben-us%3B175523&x=6&y=7
Also, even on non-Windows systems there may run a firewall or anti-virus
software or similar that accepts the connection but does not actually do
anything else. This will make (lib)curl to consider the connection connected
and thus the connect timeout won't trigger.
4.18 file:// URLs containing drive letters (Windows, NetWare)
When using cURL to try to download a local file, one might use a URL
in this format:
file://D:/blah.txt
You'll find that even if D:\blah.txt does exist, cURL returns a 'file
not found' error.
According to RFC 1738 (http://www.faqs.org/rfcs/rfc1738.html),
file:// URLs must contain a host component, but it is ignored by
most implementations. In the above example, 'D:' is treated as the
host component, and is taken away. Thus, cURL tries to open '/blah.txt'.
If your system is installed to drive C:, that will resolve to 'C:\blah.txt',
and if that doesn't exist you will get the not found error.
To fix this problem, use file:// URLs with *three* leading slashes:
file:///D:/blah.txt
Alternatively, if it makes more sense, specify 'localhost' as the host
component:
file://localhost/D:/blah.txt
In either case, cURL should now be looking for the correct file.
4.19 Why doesn't cURL return an error when the network cable is unplugged?
Unplugging the cable is not an error situation. The TCP/IP protocol stack
was designed to be fault tolerant, so even though there may be a physical
break somewhere the connection shouldn't be affected, just possibly
delayed. Eventually, the physical break will be fixed or the data will be
re-routed around the physical problem.
In such cases, the TCP/IP stack is responsible for detecting when the
network connection is irrevocably lost. Since with some protocols it is
perfectly legal for the client wait indefinitely for data, the stack may
never report a problem, and even when it does, it can take up to 20 minutes
for it to detect an issue. The curl option --keepalive-time enables
keep-alive support in the TCP/IP stack which makes it periodically probe the
connection to make sure it is still available to send data. That should
reliably detect any TCP/IP network failure.
But even that won't detect the network going down before the TCP/IP
connection is established (e.g. during a DNS lookup) or using protocols that
don't use TCP. To handle those situations, curl offers a number of timeouts
on its own. --speed-limit/--speed-time will abort if the data transfer rate
falls too low, and --connect-timeout and --max-time can be used to put an
overall timeout on the connection phase or the entire transfer.
5. libcurl Issues
5.1 Is libcurl thread-safe?
@ -1092,7 +897,7 @@ FAQ
If you use a OpenSSL-powered libcurl in a multi-threaded environment, you
need to provide one or two locking functions:
http://www.openssl.org/docs/crypto/threads.html
http://www.openssl.org/docs/crypto/threads.html#DESCRIPTION
If you use a GnuTLS-powered libcurl in a multi-threaded environment, you
need to provide locking function(s) for libgcrypt (which is used by GnuTLS
@ -1167,11 +972,6 @@ FAQ
libcurl will reuse connections for all transfers that are made using the
same libcurl handle.
When you use the easy interface, the connection cache is kept within the
easy handle. If you instead use the multi interface, the connection cache
will be kept within the multi handle and will be shared among all the easy
handles that are used within the same multi handle.
5.7 Link errors when building libcurl on Windows!
You need to make sure that your project, and all the libraries (both static
@ -1184,30 +984,27 @@ FAQ
When building an application that uses the static libcurl library, you must
add -DCURL_STATICLIB to your CFLAGS. Otherwise the linker will look for
dynamic import symbols. If you're using Visual Studio, you need to instead
add CURL_STATICLIB in the "Preprocessor Definitions" section.
dynamic import symbols. If you get linker error like "unknown symbol
__imp__curl_easy_init ..." you have linked against the wrong (static)
library. If you want to use the libcurl.dll and import lib, you don't need
any extra CFLAGS, but use one of the import libraries below. These are the
libraries produced by the various lib/Makefile.* files:
If you get linker error like "unknown symbol __imp__curl_easy_init ..." you
have linked against the wrong (static) library. If you want to use the
libcurl.dll and import lib, you don't need any extra CFLAGS, but use one of
the import libraries below. These are the libraries produced by the various
lib/Makefile.* files:
Target: static lib. import lib for libcurl*.dll.
-----------------------------------------------------------
MingW: libcurl.a libcurldll.a
MSVC (release): libcurl.lib libcurl_imp.lib
MSVC (debug): libcurld.lib libcurld_imp.lib
Borland: libcurl.lib libcurl_imp.lib
Target: static lib. import lib for libcurl*.dll.
-----------------------------------------------------------
MingW: libcurl.a libcurldll.a
MSVC (release): libcurl.lib libcurl_imp.lib
MSVC (debug): libcurld.lib libcurld_imp.lib
Borland: libcurl.lib libcurl_imp.lib
5.8 libcurl.so.X: open failed: No such file or directory
5.8 libcurl.so.3: open failed: No such file or directory
This is an error message you might get when you try to run a program linked
with a shared version of libcurl and your run-time linker (ld.so) couldn't
find the shared library named libcurl.so.X. (Where X is the number of the
current libcurl ABI, typically 3 or 4).
find the shared library named libcurl.so.3.
You need to make sure that ld.so finds libcurl.so.X. You can do that
You need to make sure that ld.so finds libcurl.so.3. You can do that
multiple ways, and it differs somewhat between different operating systems,
but they are usually:
@ -1232,20 +1029,21 @@ FAQ
- The non-ipv6 resolver that can use one out of four host name resolve calls
(depending on what your system supports):
A - gethostbyname()
B - gethostbyname_r() with 3 arguments
C - gethostbyname_r() with 5 arguments
D - gethostbyname_r() with 6 arguments
A - gethostbyname()
B - gethostbyname_r() with 3 arguments
C - gethostbyname_r() with 5 arguments
D - gethostbyname_r() with 6 arguments
- The ipv6-resolver that uses getaddrinfo()
- The c-ares based name resolver that uses the c-ares library for resolves.
Using this offers asynchronous name resolves.
Using this offers asynchronous name resolves but it currently has no IPv6
support.
- The threaded resolver (default option on Windows). It uses:
- The Windows threaded resolver. It use:
A - gethostbyname() on plain ipv4 hosts
B - getaddrinfo() on ipv6-enabled hosts
A - gethostbyname() on plain ipv4 windows hosts
B - getaddrinfo() on ipv6-enabled windows hosts
Also note that libcurl never resolves or reverse-lookups addresses given as
pure numbers, such as 127.0.0.1 or ::1.
@ -1277,17 +1075,16 @@ FAQ
5.13 How do I stop an ongoing transfer?
With the easy interface you make sure to return the correct error code from
one of the callbacks, but none of them are instant. There is no function you
can call from another thread or similar that will stop it immediately.
Instead, you need to make sure that one of the callbacks you use returns an
appropriate value that will stop the transfer. Suitable callbacks that you
can do this with include the progress callback, the read callback and the
write callback.
There are several ways, but none of them are instant. There is no function
you can call from another thread or similar that will stop it immediately.
Instead you need to make sure that one of the callbacks you use return an
appropriate value that will stop the transfer.
If you're using the multi interface, you can also stop a transfer by
removing the particular easy handle from the multi stack at any moment you
think the transfer is done or when you wish to abort the transfer.
Suitable callbacks that you can do this with include the progress callback,
the read callback and the write callback.
If you're using the multi interface, you also stop a transfer by removing
the particular easy handle from the multi stack.
5.14 Using C++ non-static functions for callbacks?
@ -1297,64 +1094,16 @@ FAQ
member function that is passed a pointer to the class:
// f is the pointer to your object.
static YourClass::func(void *buffer, size_t sz, size_t n, void *f)
static YourClass::staticFunction(void *buffer, size_t sz, size_t n, void *f)
{
// Call non-static member function.
static_cast<YourClass*>(f)->nonStaticFunction();
}
// This is how you pass pointer to the static function:
curl_easy_setopt(hcurl, CURLOPT_WRITEFUNCTION, YourClass:func);
curl_easy_setopt(hcurl, CURLOPT_WRITEFUNCTION, YourClass:staticFunction);
curl_easy_setopt(hcurl, CURLOPT_WRITEDATA, this);
5.15 How do I get an FTP directory listing?
If you end the FTP URL you request with a slash, libcurl will provide you
with a directory listing of that given directory. You can also set
CURLOPT_CUSTOMREQUEST to alter what exact listing command libcurl would use
to list the files.
The follow-up question that tend to follow the previous one, is how a
program is supposed to parse the directory listing. How does it know what's
a file and what's a dir and what's a symlink etc. The harsh reality is that
FTP provides no such fine and easy-to-parse output. The output format FTP
servers respond to LIST commands are entirely at the server's own liking and
the NLST output doesn't reveal any types and in many cases don't even
include all the directory entries. Also, both LIST and NLST tend to hide
unix-style hidden files (those that start with a dot) by default so you need
to do "LIST -a" or similar to see them.
The application thus needs to parse the LIST output. One such existing
list parser is available at http://cr.yp.to/ftpparse.html Versions of
libcurl since 7.21.0 also provide the ability to specify a wildcard to
download multiple files from one FTP directory.
5.16 I want a different time-out!
Time and time again users realize that CURLOPT_TIMEOUT and
CURLOPT_CONNECTIMEOUT are not sufficiently advanced or flexible to cover all
the various use cases and scenarios applications end up with.
libcurl offers many more ways to time-out operations. A common alternative
is to use the CURLOPT_LOW_SPEED_LIMIT and CURLOPT_LOW_SPEED_TIME options to
specify the lowest possible speed to accept before to consider the transfer
timed out.
The most flexible way is by writing your own time-out logic and using
CURLOPT_PROGRESSFUNCTION (perhaps in combination with other callbacks) and
use that to figure out exactly when the right condition is met when the
transfer should get stopped.
5.17 Can I write a server with libcurl?
No. libcurl offers no functions or building blocks to build any kind of
internet protocol server. libcurl is only a client-side library. For server
libraries, you need to continue your search elsewhere but there exist many
good open source ones out there for most protocols you could possibly want a
server for. And there are really good stand-alone ones that have been tested
and proven for many years. There's no need for you to reinvent them!
6. License Issues
Curl and libcurl are released under a MIT/X derivate license. The license is
@ -1363,10 +1112,7 @@ FAQ
this section was much enhanced by Bjorn Reese.)
We are not lawyers and this is not legal advice. You should probably consult
one if you want true and accurate legal insights without our prejudice. Note
especially that this section concerns the libcurl license only; compiling in
features of libcurl that depend on other libraries (e.g. OpenSSL) may affect
the licensing obligations of your application.
one if you want true and accurate legal insights without our prejudice.
6.1 I have a GPL program, can I use the libcurl library?
@ -1412,7 +1158,7 @@ FAQ
libraries that use it. It should be possible for everyone to use libcurl or
curl in their projects, no matter what license they already have in use.
6.7 What are my obligations when using libcurl in my commercial apps?
6.7 What are my obligations when using libcurl in my commerical apps?
Next to none. All you need to adhere to is the MIT-style license (stated in
the COPYING file) which basically says you have to include the copyright
@ -1424,16 +1170,12 @@ FAQ
You do not have to reveal or make public any changes to the libcurl source
code.
You do not have to broadcast to the world that you are using libcurl within
You do not have to reveal or make public that you are using libcurl within
your app.
All we ask is that you disclose "the copyright notice and this permission
notice" somewhere. Most probably like in the documentation or in the section
where other third party dependencies already are mentioned and acknowledged.
As can be seen here: http://curl.haxx.se/docs/companies.html and elsewhere,
more and more companies are discovering the power of libcurl and take
advantage of it even in commercial environments.
As can be seen here: http://curl.haxx.se/docs/companies.html and
elsewhere, more and more companies are dicovering the power
of libcurl and take advantage of it even in commercial environments.
7. PHP/CURL Issues
@ -1449,7 +1191,7 @@ FAQ
CURL (often using all caps) or sometimes ext/curl, but both cause much
confusion to users which in turn gives us a higher question load.
7.2 Who wrote PHP/CURL?
7.2 Who write PHP/CURL?
PHP/CURL is a module that comes with the regular PHP package. It depends and
uses libcurl, so you need to have libcurl installed properly first before

View File

@ -1,7 +1,7 @@
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
FEATURES
@ -26,12 +26,12 @@ libcurl supports
- compiles on win32 (reported builds on 40+ operating systems)
- selectable network interface for outgoing traffic
- IPv6 support on unix and Windows
- persistent connections
- persistant connections
- socks5 support
- supports user name + password in proxy environment variables
- operations through proxy "tunnel" (using CONNECT)
- supports large files (>2GB and >4GB) both upload/download
- replaceable memory functions (malloc, free, realloc, etc)
- replacable memory functions (malloc, free, realloc, etc)
- asynchronous name resolving (*6)
- both a push and a pull style interface
@ -43,7 +43,7 @@ HTTP
- POST
- Pipelining
- multipart formpost (RFC1867-style)
- authentication: Basic, Digest, NTLM (*9), GSS-Negotiate/Negotiate (*3) and
- authentication: Basic, Digest, NTLM(*1), GSS-Negotiate/Negotiate(*3) and
SPNEGO (*4) to server and proxy
- resume (both GET and PUT)
- follow redirects
@ -122,52 +122,10 @@ FILE
- "uploads"
- resume
SMTP
- authentication: Plain, Login, CRAM-MD5, Digest-MD5 and NTLM (*9)
- send mail
- mail from support
- mail size support
- mail auth support for trusted server-to-server relaying
- multiple recipients
- via http-proxy
SMTPS (*1)
- implicit smtps:// support
- explicit "STARTTLS" usage to "upgrade" plain smtp:// connections to use SSL
- via http-proxy
POP3
- authentication: Clear Text, APOP and SASL
- SASL based authentication: Plain, Login, CRAM-MD5, Digest-MD5 and
NTLM (*9)
- list e-mails
- retrieve e-mails
- enhanced command support for: CAPA, DELE, TOP, STAT, UIDL and NOOP
- via http-proxy
POP3S (*1)
- implicit pop3s:// support
- explicit "STLS" usage to "upgrade" plain pop3:// connections to use SSL
- via http-proxy
IMAP
- authentication: Clear Text and SASL
- select mailbox
- basic fetch e-mail support
- SASL based authentication: Plain, Login, CRAM-MD5, Digest-MD5 and
NTLM (*9)
- via http-proxy
IMAPS (*1)
- implicit imaps:// support
- explicit "STARTTLS" usage to "upgrade" plain imap:// connections to use SSL
- via http-proxy
FOOTNOTES
=========
*1 = requires OpenSSL, GnuTLS, NSS, yassl, axTLS, PolarSSL, schannel (native
Windows), Secure Transport (native iOS/OS X) or qssl (native IBM i)
*1 = requires OpenSSL, GnuTLS, NSS or yassl
*2 = requires OpenLDAP
*3 = requires a GSSAPI-compliant library, such as Heimdal or similar.
*4 = requires FBopenssl
@ -175,4 +133,3 @@ FOOTNOTES
*6 = requires c-ares
*7 = requires OpenSSL or NSS, as GnuTLS only supports SSLv3 and TLSv1
*8 = requires libssh2
*9 = requires OpenSSL, GnuTLS, NSS, yassl or SSPI (native Windows)

View File

@ -77,7 +77,7 @@ different bindings exist at the time of this writing.
September 2000, kerberos4 support was added.
In November 2000 started the work on a test suite for curl. It was later
re-written from scratch again. The libcurl major SONAME number was set to 1.
re-written from scratch again.
January 2001, Daniel released curl 7.5.2 under a new license again: MIT (or
MPL). The MIT license is extremely liberal and can be used combined with GPL
@ -88,7 +88,7 @@ deemed "GPL incompatible".)
curl supports HTTP 1.1 starting with the release of 7.7, March 22 2001. This
also introduced libcurl's ability to do persistent connections. 24000 lines of
code. The libcurl major SONAME number was bumped to 2 due to this overhaul.
code.
The first experimental ftps:// support was added in March 2001.
@ -129,12 +129,7 @@ December 2003, full-fledged SSL for FTP is supported.
January 2004: curl 7.11.0 introduced large file support.
June 2004:
curl 7.12.0 introduced IDN support. 10 official web mirrors.
This release bumped the major SONAME to 3 due to the removal of the
curl_formparse() function
June 2004: curl 7.12.0 introduced IDN support. 10 official web mirrors.
August 2004:
Curl and libcurl 7.12.1
@ -149,96 +144,10 @@ August 2004:
April 2005:
GnuTLS can now optionally be used for the secure layer when curl is built.
GnuTLS can now optionally be used for the secure layer when curl is built.
September 2005:
TFTP support was added.
TFTP support was added.
More than 100,000 unique visitors of the curl web site. 25 mirrors.
December 2005:
security vulnerability: libcurl URL Buffer Overflow
January 2006:
We dropped support for Gopher. We found bugs in the implementation that
turned out having been introduced years ago, so with the conclusion that
nobody had found out in all this time we removed it instead of fixing it.
March 2006:
security vulnerability: libcurl TFTP Packet Buffer Overflow
April 2006:
Added the multi_socket() API
September 2006:
The major SONAME number for libcurl was bumped to 4 due to the removal of
ftp third party transfer support.
November 2006:
Added SCP and SFTP support
February 2007:
Added support for the Mozilla NSS library to do the SSL/TLS stuff
July 2007:
security vulnerability: libcurl GnuTLS insufficient cert verification
November 2008:
Command line options: 128
curl_easy_setopt() options: 158
Public functions in libcurl: 58
Known libcurl bindings: 37
Contributors: 683
145,000 unique visitors. >100 GB downloaded.
March 2009:
security vulnerability: libcurl Arbitrary File Access
August 2009:
security vulnerability: libcurl embedded zero in cert name
December 2009:
Added support for IMAP, POP3 and SMTP
January 2010:
Added support for RTSP
February 2010:
security vulnerability: libcurl data callback excessive length
March 2010:
The project switched over to use git instead of CVS for source code control
May 2010:
Added support for RTMP
Added support for PolarSSL to do the SSL/TLS stuff
August 2010:
Public curl releases: 117
Command line options: 138
curl_easy_setopt() options: 180
Public functions in libcurl: 58
Known libcurl bindings: 39
Contributors: 808
Gopher support added (re-added actually)
More than 100,000 unique visitors of the curl web site. 25 mirrors.

View File

@ -1,123 +0,0 @@
Updated: July 3, 2012 (http://curl.haxx.se/docs/http-cookies.html)
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
HTTP Cookies
1. HTTP Cookies
1.1 Cookie overview
1.2 Cookies saved to disk
1.3 Cookies with curl the command line tool
1.4 Cookies with libcurl
1.5 Cookies with javascript
==============================================================================
1. HTTP Cookies
1.1 Cookie overview
HTTP cookies are pieces of 'name=contents' snippets that a server tells the
client to hold and then the client sends back those the server on subsequent
requests to the same domains/paths for which the cookies were set.
Cookies are either "session cookies" which typically are forgotten when the
session is over which is often translated to equal when browser quits, or
the cookies aren't session cookies they have expiration dates after which
the client will throw them away.
Cookies are set to the client with the Set-Cookie: header and are sent to
servers with the Cookie: header.
For a very long time, the only spec explaining how to use cookies was the
original Netscape spec from 1994: http://curl.haxx.se/rfc/cookie_spec.html
In 2011, RFC6265 (http://www.ietf.org/rfc/rfc6265.txt) was finally published
and details how cookies work within HTTP.
1.2 Cookies saved to disk
Netscape once created a file format for storing cookies on disk so that they
would survive browser restarts. curl adopted that file format to allow
sharing the cookies with browsers, only to see browsers move away from that
format. Modern browsers no longer use it, while curl still does.
The netscape cookie file format stores one cookie per physical line in the
file with a bunch of associated meta data, each field separated with
TAB. That file is called the cookiejar in curl terminology.
When libcurl saves a cookiejar, it creates a file header of its own in which
there is a URL mention that will link to the web version of this document.
1.3 Cookies with curl the command line tool
curl has a full cookie "engine" built in. If you just activate it, you can
have curl receive and send cookies exactly as mandated in the specs.
Command line options:
-b, --cookie
tell curl a file to read cookies from and start the cookie engine, or if
it isn't a file it will pass on the given string. -b name=var works and so
does -b cookiefile.
-j, --junk-session-cookies
when used in combination with -b, it will skip all "session cookies" on
load so as to appear to start a new cookie session.
-c, --cookie-jar
tell curl to start the cookie engine and write cookies to the given file
after the request(s)
1.4 Cookies with libcurl
libcurl offers several ways to enable and interface the cookie engine. These
options are the ones provided by the native API. libcurl bindings may offer
access to them using other means.
CURLOPT_COOKIE
Is used when you want to specify the exact contents of a cookie header to
send to the server.
CURLOPT_COOKIEFILE
Tell libcurl to activate the cookie engine, and to read the initial set of
cookies from the given file. Read-only.
CURLOPT_COOKIEJAR
Tell libcurl to activate the cookie engine, and when the easy handle is
closed save all known cookies to the given cookiejar file. Write-only.
CURLOPT_COOKIELIST
Provide detailed information about a single cookie to add to the internal
storage of cookies. Pass in the cookie as a HTTP header with all the
details set, or pass in a line from a netscape cookie file. This option
can also be used to flush the cookies etc.
CURLINFO_COOKIELIST
Extract cookie information from the internal cookie storage as a linked
list.
1.5 Cookies with javascript
These days a lot of the web is built up by javascript. The webbrowser loads
complete programs that render the page you see. These javascript programs
can also set and access cookies.
Since curl and libcurl are plain HTTP clients without any knowledge of or
capability to handle javascript, such cookies will not be detected or used.
Often, if you want to mimic what a browser does on such web sites, you can
record web browser HTTP traffic when using such a site and then repeat the
cookie operations using curl or libcurl.

View File

@ -14,12 +14,6 @@ Installing Binary Packages
binary package. This document describes how to compile, build and install
curl and libcurl from source code.
Building from git
=================
If you get your code off a git repository, see the GIT-INFO file in the
root directory for specific instructions on how to proceed.
UNIX
====
A normal unix installation is made in three or four steps (after you've
@ -32,8 +26,8 @@ UNIX
You probably need to be root when doing the last command.
If you have checked out the sources from the git repository, read the
GIT-INFO on how to proceed.
If you have checked out the sources from the CVS repository, read the
CVS-INFO on how to proceed.
Get a full listing of all available configure options by invoking it like:
@ -135,28 +129,22 @@ UNIX
default. But if you want to alter it, you can select how to deal with
each individual library.
To build with GnuTLS for SSL/TLS, use both --without-ssl and
--with-gnutls.
To build with GnuTLS support instead of OpenSSL for SSL/TLS, note that
you need to use both --without-ssl and --with-gnutls.
To build with Cyassl for SSL/TLS, use both --without-ssl and
--with-cyassl.
To build with yassl support instead of OpenSSL or GnuTLS, you must build
yassl with its OpenSSL emulation enabled and point to that directory root
with configure --with-ssl.
To build with NSS for SSL/TLS, use both --without-ssl and --with-nss.
To build with PolarSSL for SSL/TLS, use both --without-ssl and
--with-polarssl.
To build with axTLS for SSL/TLS, use both --without-ssl and --with-axtls.
To build with NSS support instead of OpenSSL for SSL/TLS, note that
you need to use both --without-ssl and --with-nss.
To get GSSAPI support, build with --with-gssapi and have the MIT or
Heimdal Kerberos 5 packages installed.
To get support for SCP and SFTP, build with --with-libssh2 and have
libssh2 0.16 or later installed.
To get Metalink support, build with --with-libmetalink and have the
libmetalink packages installed.
SPECIAL CASES
-------------
Some versions of uClibc require configuring with CPPFLAGS=-D_GNU_SOURCE=1
@ -189,17 +177,11 @@ Win32
KB140584 - How to link with the correct C Run-Time (CRT) library
http://support.microsoft.com/kb/140584/en-us
KB190799 - Potential Errors Passing CRT Objects Across DLL Boundaries
http://msdn.microsoft.com/en-us/library/ms235460
If your app is misbehaving in some strange way, or it is suffering
from memory corruption, before asking for further help, please try
first to rebuild every single library your app uses as well as your
app using the debug multithreaded dynamic C runtime.
If you get linkage errors read section 5.7 of the FAQ document.
MingW32
-------
@ -217,12 +199,12 @@ Win32
If you have any problems linking libraries or finding header files, be sure
to verify that the provided "Makefile.m32" files use the proper paths, and
adjust as necessary. It is also possible to override these paths with
adjust as necessary. It is also possible to override these paths with
environment variables, for example:
set ZLIB_PATH=c:\zlib-1.2.7
set OPENSSL_PATH=c:\openssl-0.9.8x
set LIBSSH2_PATH=c:\libssh2-1.4.3
set ZLIB_PATH=c:\zlib-1.2.3
set OPENSSL_PATH=c:\openssl-0.9.8g
set LIBSSH2_PATH=c:\libssh2-0.17
ATTENTION: if you want to build with libssh2 support you have to use latest
version 0.17 - previous versions will NOT work with 7.17.0 and later!
@ -241,7 +223,7 @@ Win32
If you want to enable LDAPS support then set LDAPS=1.
- optional MingW32-built OpenLDAP SDK available from:
- optional MingW32-built OpenlDAP SDK available from:
http://www.gknw.net/mirror/openldap/
- optional recent Novell CLDAP SDK available from:
http://developer.novell.com/ndk/cldap.htm
@ -261,53 +243,6 @@ Win32
See the separate INSTALL.devcpp file for details.
MSVC 6 caveats
--------------
If you use MSVC 6 it is required that you use the February 2003 edition PSDK:
http://www.microsoft.com/msdownload/platformsdk/sdkupdate/psdk-full.htm
Building any software with MSVC 6 without having PSDK installed is just
asking for trouble down the road once you have released it, you might notice
the problems in the first corner or ten miles ahead, depending mostly on your
choice of static vs dynamic runtime and third party libraries. Anyone using
software built in such way will at some point regret having done so.
When someone uses MSVC 6 without PSDK he is using a compiler back from 1998.
If the compiler has been updated with the installation of a service pack as
those mentioned in http://support.microsoft.com/kb/194022 the compiler can be
safely used to read source code, translate and make it object code.
But, even with the service packs mentioned above installed, the resulting
software generated in such an environment will be using outdated system
header files and libraries with bugs and security issues which have already
been addressed and fixed long time ago.
In order to make use of the updated system headers and fixed libraries
for MSVC 6, it is required that 'Platform SDK', PSDK from now onwards,
is installed. The specific PSDK that must be installed for MSVC 6 is the
February 2003 edition, which is the latest one supporting the MSVC 6 compiler,
this PSDK is also known as 'Windows Server 2003 PSDK' and can be downloaded
from http://www.microsoft.com/msdownload/platformsdk/sdkupdate/psdk-full.htm
So, building curl and libcurl with MSVC 6 without PSDK is absolutely
discouraged for the benefit of anyone using software built in such
environment. And it will not be supported in any way, as we could just
be hunting bugs which have already been fixed way back in 2003.
When building with MSVC 6 we attempt to detect if PSDK is not being used,
and if this is the case the build process will fail hard with an error
message stating that the February 2003 PSDK is required. This is done to
protect the unsuspecting and avoid PEBKAC issues.
Additionally it might happen that a die hard MSVC hacker still wants to
build curl and libcurl with MSVC 6 without PSDK installed, even knowing
that this is a highly discouraged and unsupported build environment. In
this case the brave of heart will be able to build in such an environment
with the requisite of defining preprocessor symbol ALLOW_MSVC6_WITHOUT_PSDK
in lib/config-win32.h and knowing that LDAP and IPv6 support will be missing.
MSVC from command line
----------------------
@ -323,7 +258,7 @@ Win32
documentation on how to compile zlib. Define the ZLIB_PATH environment
variable to the location of zlib.h and zlib.lib, for example:
set ZLIB_PATH=c:\zlib-1.2.7
set ZLIB_PATH=c:\zlib-1.2.3
Then run 'nmake vc-zlib' in curl's root directory.
@ -337,7 +272,7 @@ Win32
Before running nmake define the OPENSSL_PATH environment variable with
the root/base directory of OpenSSL, for example:
set OPENSSL_PATH=c:\openssl-0.9.8x
set OPENSSL_PATH=c:\openssl-0.9.8g
Then run 'nmake vc-ssl' or 'nmake vc-ssl-dll' in curl's root
directory. 'nmake vc-ssl' will create a libcurl static and dynamic
@ -386,49 +321,30 @@ Win32
Borland C++ compiler
---------------------
Ensure that your build environment is properly set up to use the compiler
and associated tools. PATH environment variable must include the path to
bin subdirectory of your compiler installation, eg: c:\Borland\BCC55\bin
compile openssl
It is advisable to set environment variable BCCDIR to the base path of
the compiler installation.
Make sure you include the paths to curl/include and openssl/inc32 in
your bcc32.cnf file
set BCCDIR=c:\Borland\BCC55
eg : -I"c:\Bcc55\include;c:\path_curl\include;c:\path_openssl\inc32"
In order to build a plain vanilla version of curl and libcurl run the
following command from curl's root directory:
Check to make sure that all of the sources listed in lib/Makefile.b32
are present in the /path_to_curl/lib directory. (Check the src
directory for missing ones.)
make borland
Make sure the environment variable "BCCDIR" is set to the install
location for the compiler eg : c:\Borland\BCC55
To build curl and libcurl with zlib and OpenSSL support set environment
variables ZLIB_PATH and OPENSSL_PATH to the base subdirectories of the
already built zlib and OpenSSL libraries and from curl's root directory
run command:
command line:
make -f /path_to_curl/lib/Makefile-ssl.b32
make borland-ssl-zlib
libcurl library will be built in 'lib' subdirectory while curl tool
is built in 'src' subdirectory. In order to use libcurl library it is
advisable to modify compiler's configuration file bcc32.cfg located
in c:\Borland\BCC55\bin to reflect the location of libraries include
paths for example the '-I' line could result in something like:
-I"c:\Borland\BCC55\include;c:\curl\include;c:\openssl\inc32"
bcc3.cfg '-L' line could also be modified to reflect the location of
of libcurl library resulting for example:
-L"c:\Borland\BCC55\lib;c:\curl\lib;c:\openssl\out32"
In order to build sample program 'simple.c' from the docs\examples
subdirectory run following command from mentioned subdirectory:
bcc32 simple.c libcurl.lib cw32mt.lib
In order to build sample program simplessl.c an SSL enabled libcurl
is required, as well as the OpenSSL libeay32.lib and ssleay32.lib
libraries.
compile simplessl.c with appropriate links
c:\curl\docs\examples\> bcc32 -L c:\path_to_curl\lib\libcurl.lib
-L c:\borland\bcc55\lib\psdk\ws2_32.lib
-L c:\openssl\out32\libeay32.lib
-L c:\openssl\out32\ssleay32.lib
simplessl.c
OTHER MSVC IDEs
---------------
@ -464,40 +380,12 @@ Win32
possibilities:
- Modify lib/config-win32.h
- Modify lib/curl_setup.h
- Modify lib/setup.h
- Modify lib/Makefile.vc6
- Add defines to Project/Settings/C/C++/General/Preprocessor Definitions
in the vc6libcurl.dsw/vc6libcurl.dsp Visual C++ 6 IDE project.
in the curllib.dsw/curllib.dsp Visual C++ 6 IDE project.
Using BSD-style lwIP instead of Winsock TCP/IP stack in Win32 builds
--------------------------------------------------------------------
In order to compile libcurl and curl using BSD-style lwIP TCP/IP stack
it is necessary to make definition of preprocessor symbol USE_LWIPSOCK
visible to libcurl and curl compilation processes. To set this definition
you have the following alternatives:
- Modify lib/config-win32.h and src/config-win32.h
- Modify lib/Makefile.vc6
- Add definition to Project/Settings/C/C++/General/Preprocessor Definitions
in the vc6libcurl.dsw/vc6libcurl.dsp Visual C++ 6 IDE project.
Once that libcurl has been built with BSD-style lwIP TCP/IP stack support,
in order to use it with your program it is mandatory that your program
includes lwIP header file <lwip/opt.h> (or another lwIP header that includes
this) before including any libcurl header. Your program does not need the
USE_LWIPSOCK preprocessor definition which is for libcurl internals only.
Compilation has been verified with lwIP 1.4.0 and contrib-1.4.0 from:
http://download.savannah.gnu.org/releases/lwip/lwip-1.4.0.zip
http://download.savannah.gnu.org/releases/lwip/contrib-1.4.0.zip
This BSD-style lwIP TCP/IP stack support must be considered experimental
given that it has been verified that lwIP 1.4.0 still needs some polish,
and libcurl might yet need some additional adjustment, caveat emptor.
Important static libcurl usage note
-----------------------------------
@ -506,38 +394,6 @@ Win32
dynamic import symbols.
Apple iOS and Mac OS X
======================
On recent Apple operating systems, curl can be built to use Apple's
SSL/TLS implementation, Secure Transport, instead of OpenSSL. To build with
Secure Transport for SSL/TLS, use the configure option --with-darwinssl. (It
is not necessary to use the option --without-ssl.) This feature requires iOS
5.0 or later, or OS X 10.5 ("Leopard") or later.
When Secure Transport is in use, the curl options --cacert and --capath and
their libcurl equivalents, will be ignored, because Secure Transport uses
the certificates stored in the Keychain to evaluate whether or not to trust
the server. This, of course, includes the root certificates that ship with
the OS. The --cert and --engine options, and their libcurl equivalents, are
currently unimplemented in curl with Secure Transport.
For OS X users: In OS X 10.8 ("Mountain Lion"), Apple made a major
overhaul to the Secure Transport API that, among other things, added
support for the newer TLS 1.1 and 1.2 protocols. To get curl to support
TLS 1.1 and 1.2, you must build curl on Mountain Lion or later, or by
using the equivalent SDK. If you set the MACOSX_DEPLOYMENT_TARGET
environmental variable to an earlier version of OS X prior to building curl,
then curl will use the new Secure Transport API on Mountain Lion and later,
and fall back on the older API when the same curl binary is executed on
older cats. For example, running these commands in curl's directory in the
shell will build the code such that it will run on cats as old as OS X 10.6
("Snow Leopard") (using bash):
export MACOSX_DEPLOYMENT_TARGET="10.6"
./configure --with-darwinssl
make
IBM OS/2
========
Building under OS/2 is not much different from building under unix.
@ -575,7 +431,7 @@ VMS
Curl seems to work with FTP & HTTP other protocols are not tested. (the
perl http/ftp testing server supplied as testing too cannot work on VMS
because vms has no concept of fork(). [ I tried to give it a whack, but
that's of no use.
thats of no use.
SSL stuff has not been ported.
@ -707,8 +563,9 @@ NetWare
- optional OpenSSL sources (version 0.9.8 or later build with BSD sockets);
you can find precompiled packages at:
http://www.gknw.net/development/ossl/netware/
for CLIB-based builds OpenSSL 0.9.8h or later is required - earlier versions
don't support building with CLIB BSD sockets.
for CLIB-based builds OpenSSL needs to be patched to build with BSD
sockets (currently only a winsock-based CLIB build is supported):
http://www.gknw.net/development/ossl/netware/patches/v_0.9.8g/openssl-0.9.8g.diff
- optional SSH2 sources (version 0.17 or later);
Set a search path to your compiler, linker and tools; on Linux make
@ -726,10 +583,10 @@ NetWare
a 'set | grep OSTYPE' shows the var present and set; I simply overwrote it
with 'OSTYPE=linux-rh9-gnu' and the detection in the Makefile worked...
Any help in testing appreciated!
Builds automatically created 8 times a day from current git are here:
Builds automatically created 8 times a day from current CVS are here:
http://www.gknw.net/mirror/curl/autobuilds/
the status of these builds can be viewed at the autobuild table:
http://curl.haxx.se/dev/builds.html
http://curl.haxx.se/auto/
eCos
@ -766,12 +623,12 @@ eCos
of running curl in this way is the contents of the configuration file
printed to the console.
--- src/main.c 19 Jul 2006 19:09:56 -0000 1.363
+++ src/main.c 24 Jul 2006 21:37:23 -0000
--- src/main.c 19 Jul 2006 19:09:56 -0000 1.363
+++ src/main.c 24 Jul 2006 21:37:23 -0000
@@ -4286,11 +4286,31 @@
}
+#ifdef __ECOS
+#include <cyg/fileio/fileio.h>
+MTAB_ENTRY( testfs_mte1,
@ -780,7 +637,7 @@ eCos
+ "",
+ 0);
+#endif
int main(int argc, char *argv[])
{
int res;
@ -798,40 +655,37 @@ eCos
+ }
+#endif
memset(&config, 0, sizeof(struct Configurable));
config.errors = stderr; /* default errors to stderr */
Minix
=====
curl can be compiled on Minix 3 using gcc or ACK (starting with
ver. 3.1.3). Ensure that GNU gawk and bash are both installed and
available in the PATH.
ver. 3.1.3).
ACK
---
Increase the heap sizes of the compiler with the command:
binsizes xxl
then configure and compile curl with:
Configure and compile with:
./configure CC=cc LD=cc AR=/usr/bin/aal GREP=grep \
CPPFLAGS='-D_POSIX_SOURCE=1 -I/usr/local/include'
./configure CONFIG_SHELL=/bin/bigsh CC=cc LD=cc AR=/usr/bin/aal \
GREP=grep CPPFLAGS=-D_POSIX_SOURCE=1
make
chmem =256000 src/curl
GCC
---
Make sure gcc is in your PATH with the command:
export PATH=/usr/gnu/bin:$PATH
then configure and compile curl with:
./configure CC=gcc AR=/usr/gnu/bin/gar GREP=grep
./configure CONFIG_SHELL=/bin/bigsh CC=gcc AR=/usr/gnu/bin/gar GREP=grep
make
chmem =256000 src/curl
Symbian OS
@ -842,77 +696,9 @@ Symbian OS
bldmake bldfiles
abld build
to compile and install curl and libcurl using SBSv1. If your Symbian
SDK doesn't include support for P.I.P.S., you will need to contact
your SDK vendor to obtain that first.
VxWorks
========
Build for VxWorks is performed using cross compilation.
That means you build on Windows machine using VxWorks tools and
run the built image on the VxWorks device.
To build libcurl for VxWorks you need:
- CYGWIN (free, http://cygwin.com/)
- Wind River Workbench (commercial)
If you have CYGWIN and Workbench installed on you machine
follow after next steps:
1. Open the Command Prompt window and change directory ('cd')
to the libcurl 'lib' folder.
2. Add CYGWIN 'bin' folder to the PATH environment variable.
For example, type 'set PATH=C:/embedded/cygwin/bin;%PATH%'.
3. Adjust environment variables defined in 'Environment' section
of the Makefile.vxworks file to point to your software folders.
4. Build the libcurl by typing 'make -f ./Makefile.vxworks'
As a result the libcurl.a library should be created in the 'lib' folder.
To clean the build results type 'make -f ./Makefile.vxworks clean'.
Android
=======
Method using the static makefile:
- see the build notes in the Android.mk file.
Method using a configure cross-compile (tested with Android NDK r7c, r8):
- prepare the toolchain of the Android NDK for standalone use; this can
be done by invoking the script:
./build/tools/make-standalone-toolchain.sh
which creates a usual cross-compile toolchain. Lets assume that you put
this toolchain below /opt then invoke configure with something like:
export PATH=/opt/arm-linux-androideabi-4.4.3/bin:$PATH
./configure --host=arm-linux-androideabi [more configure options]
make
- if you want to compile directly from our GIT repo you might run into
this issue with older automake stuff:
checking host system type...
Invalid configuration `arm-linux-androideabi':
system `androideabi' not recognized
configure: error: /bin/sh ./config.sub arm-linux-androideabi failed
this issue can be fixed with using more recent versions of config.sub
and config.guess which can be obtained here:
http://git.savannah.gnu.org/gitweb/?p=config.git;a=tree
you need to replace your system-own versions which usually can be
found in your automake folder:
find /usr -name config.sub
Wrapper for pkg-config
- In order to make proper use of pkg-config so that configure is able to
find all dependencies you should create a wrapper script for pkg-config;
file /opt/arm-linux-androideabi-4.4.3/bin/arm-linux-androideabi-pkg-config:
#!/bin/sh
SYSROOT=$(dirname ${0%/*})/sysroot
export PKG_CONFIG_DIR=
export PKG_CONFIG_LIBDIR=${SYSROOT}/usr/local/lib/pkgconfig:${SYSROOT}/usr/share/pkgconfig
export PKG_CONFIG_SYSROOT_DIR=${SYSROOT}
exec pkg-config "$@"
also create a copy or symlink with name arm-unknown-linux-androideabi-pkg-config.
to compile and install curl and libcurl. If your Symbian SDK doesn't
include support for P.I.P.S., you will need to contact your SDK vendor
to obtain that first.
CROSS COMPILE
@ -944,10 +730,10 @@ CROSS COMPILE
export NM=ppc_405-nm
./configure --target=powerpc-hardhat-linux \
--host=powerpc-hardhat-linux \
--build=i586-pc-linux-gnu \
--prefix=/opt/hardhat/devkit/ppc/405/target/usr/local \
--exec-prefix=/usr/local
--host=powerpc-hardhat-linux \
--build=i586-pc-linux-gnu \
--prefix=/opt/hardhat/devkit/ppc/405/target/usr/local \
--exec-prefix=/usr/local
(end script)
@ -996,14 +782,9 @@ REDUCING SIZE
--without-ssl (disables support for SSL/TLS)
--without-zlib (disables support for on-the-fly decompression)
The GNU compiler and linker have a number of options that can reduce the
size of the libcurl dynamic libraries on some platforms even further.
Specify them by providing appropriate CFLAGS and LDFLAGS variables on the
configure command-line, e.g.
CFLAGS="-Os -ffunction-sections -fdata-sections \
-fno-unwind-tables -fno-asynchronous-unwind-tables" \
LDFLAGS="-Wl,-s -Wl,-Bsymbolic -Wl,--gc-sections"
The GNU linker has a number of options to reduce the size of the libcurl
dynamic libraries on some platforms even further. Specify them by giving
the options -Wl,-Bsymbolic and -Wl,-s on the gcc command-line.
Be sure also to strip debugging symbols from your binaries after
compiling using 'strip' (or the appropriate variant if cross-compiling).
If space is really tight, you may be able to remove some unneeded
@ -1011,9 +792,9 @@ REDUCING SIZE
.comment section).
Using these techniques it is possible to create a basic HTTP-only shared
libcurl library for i386 Linux platforms that is only 106 KiB in size, and
an FTP-only library that is 108 KiB in size (as of libcurl version 7.27.0,
using gcc 4.6.3).
libcurl library for i386 Linux platforms that is only 94 KiB in size, and
an FTP-only library that is 87 KiB in size (as of libcurl version 7.19.1,
using gcc 4.2.2).
You may find that statically linking libcurl to your application will
result in a lower total size than dynamically linking.
@ -1045,15 +826,9 @@ PORTS
- Alpha OpenVMS V7.1-1H2
- Alpha Tru64 v5.0 5.1
- AVR32 Linux
- ARM Android 1.5, 2.1
- ARM INTEGRITY
- ARM iOS
- Cell Linux
- Cell Cell OS
- HP-PA HP-UX 9.X 10.X 11.X
- HP-PA Linux
- HP3000 MPE/iX
- MicroBlaze uClinux
- MIPS IRIX 6.2, 6.5
- MIPS Linux
- OS/400
@ -1073,6 +848,7 @@ PORTS
- StrongARM (and other ARM) RISC OS 3.1, 4.02
- StrongARM/ARM7/ARM9 Linux 2.4, 2.6
- StrongARM NetBSD 1.4.1
- ARM INTEGRITY
- Symbian OS (P.I.P.S.) 9.x
- TPF
- Ultrix 4.3a
@ -1085,7 +861,6 @@ PORTS
- i386 HURD
- i386 Haiku OS
- i386 Linux 1.3, 2.0, 2.2, 2.3, 2.4, 2.6
- i386 Mac OS X
- i386 MINIX 3.1
- i386 NetBSD
- i386 Novell NetWare
@ -1103,28 +878,16 @@ PORTS
- m68k OpenBSD
- m88k dg-dgux5.4R3.00
- s390 Linux
- x86_64 Linux
- XScale/PXA250 Linux 2.4
- Nios II uClinux
Useful URLs
===========
axTLS http://axtls.sourceforge.net/
c-ares http://c-ares.haxx.se/
GNU GSS http://www.gnu.org/software/gss/
GnuTLS http://www.gnu.org/software/gnutls/
Heimdal http://www.pdc.kth.se/heimdal/
libidn http://www.gnu.org/software/libidn/
libssh2 http://www.libssh2.org/
MIT Kerberos http://web.mit.edu/kerberos/www/dist/
NSS http://www.mozilla.org/projects/security/pki/nss/
OpenLDAP http://www.openldap.org/
OpenSSL http://www.openssl.org/
PolarSSL http://polarssl.org/
yassl http://www.yassl.com/
Zlib http://www.zlib.net/
OpenSSL http://www.openssl.org
MingW http://www.mingw.org
OpenLDAP http://www.openldap.org
Zlib http://www.gzip.org/zlib/
libssh2 http://www.libssh2.org
MingW http://www.mingw.org/
MinGW-w64 http://mingw-w64.sourceforge.net/
OpenWatcom http://www.openwatcom.org/

View File

@ -1,13 +1,13 @@
DevCpp-Mingw Install & Compilation Sept 2005
==================================
Reference Emails available at curl@haxx.se:
Reference Emails available at curl@haxx.se:
Libcurl Install and Use Issues
Awaiting an Answer for Win 32 Install
Awaiting an Answer for Win 32 Install
res = curl_easy_perform(curl); Error
Makefile Issues
Having previously done a thorough review of what was available that met my
requirements under GPL, I settled for Libcurl as the software of choice for
@ -26,7 +26,7 @@ exists for a Unix/linux command line environments. This is of little help when
it comes to Windows O/S.
Secondly the help that does exist for the Windows O/S focused around mingw
through a command line argument environment.
thru a command line argument environment.
You may ask "Why is this a problem?"
@ -110,7 +110,7 @@ Next read the contents of Makefile.m32. It includes instructions on its use.
Method I - DOS Command Line
---------------------------
Note - The only reason I have included this method is that Method II which is
the preferred method for compiling does not allow for the setting of option
switches (e.g. SSL = 1 or SSL =0). At least that's what they tell me at the
@ -191,7 +191,7 @@ the contents of the LIB Folder. If not go there.
files and close box. Wait till all files are added. This may take 30 seconds
or longer.
9- Drop the Menu Execute/Click on Compile.
9- Drop the Menu Execute/Click on Compile.
10- That's it.
@ -269,7 +269,7 @@ added. Otherwise you may experience link errors.
2- Don't forget to include #include "curl/curl.h".
e.g.
#define CURL_STATICLIB
#define CURL_STATICLIB
#include <windows.h>
#include "curl/curl.h"
#include <fstream>

View File

@ -1,7 +1,7 @@
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
INTERNALS
@ -12,10 +12,10 @@ INTERNALS
The largest amount of code and complexity is in the library part.
GIT
CVS
===
All changes to the sources are committed to the git repository as soon as
they're somewhat verified to work. Changes shall be committed as independently
All changes to the sources are committed to the CVS repository as soon as
they're somewhat verified to work. Changes shall be commited as independently
as possible so that individual changes can be easier spotted and tracked
afterwards.
@ -37,16 +37,18 @@ Portability
GnuTLS 1.2
zlib 1.1.4
libssh2 0.16
c-ares 1.6.0
c-ares 1.5.0
libidn 0.4.1
cyassl 2.0.0
*yassl 1.4.0 (http://curl.haxx.se/mail/lib-2008-02/0093.html)
openldap 2.0
MIT krb5 lib 1.2.4
qsossl V5R2M0
NSS 3.12.x
axTLS 1.2.7
NSS 3.11.x
Heimdal ?
* = only partly functional, but that's due to bugs in the third party lib, not
because of libcurl code
On systems where configure runs, we aim at working on them all - if they have
a suitable C compiler. On systems that don't run configure, we strive to keep
curl running fine on:
@ -65,7 +67,7 @@ Portability
GNU Autoconf 2.57
GNU Automake 1.7 (we currently avoid 1.10 due to Solaris-related bugs)
GNU M4 1.4
perl 5.004
perl 4
roffit 0.5
groff ? (any version that supports "groff -Tps -man [in] [out]")
ps2pdf (gs) ?
@ -88,7 +90,7 @@ Windows vs Unix
do it etc there might be reasons for applications to alter that behaviour.
3. The file descriptors for network communication and file operations are
not easily interchangeable as in unix.
not easily interchangable as in unix.
We avoid this by not trying any funny tricks on file descriptors.
@ -101,9 +103,9 @@ Windows vs Unix
Inside the source code, We make an effort to avoid '#ifdef [Your OS]'. All
conditionals that deal with features *should* instead be in the format
'#ifdef HAVE_THAT_WEIRD_FUNCTION'. Since Windows can't run configure scripts,
we maintain a curl_config-win32.h file in lib directory that is supposed to
look exactly as a curl_config.h file would have looked like on a Windows
machine!
we maintain two config-win32.h files (one in lib/ and one in src/) that are
supposed to look exactly as a config.h file would have looked like on a
Windows machine!
Generally speaking: always remember that this will be compiled on dozens of
operating systems. Don't walk on the edge.
@ -180,7 +182,7 @@ Library
Some time during the DO function, the Curl_setup_transfer() function must
be called with some basic info about the upcoming transfer: what socket(s)
to read/write and the expected file transfer sizes (if known).
to read/write and the expected file tranfer sizes (if known).
o Transfer()
@ -217,20 +219,20 @@ Library
done" loop. It loops if there's a Location: to follow.
When completed, the curl_easy_cleanup() should be called to free up used
resources. It runs Curl_disconnect() on all open connections.
resources. It runs Curl_disconnect() on all open connectons.
A quick roundup on internal function sequences (many of these call
protocol-specific function-pointers):
Curl_connect - connects to a remote site and does initial connect fluff
curl_connect - connects to a remote site and does initial connect fluff
This also checks for an existing connection to the requested site and uses
that one if it is possible.
Curl_do - starts a transfer
Curl_handler::do_it() - transfers data
Curl_done - ends a transfer
curl_do - starts a transfer
curl_transfer() - transfers data
curl_done - ends a transfer
Curl_disconnect - disconnects from a remote site. This is called when the
curl_disconnect - disconnects from a remote site. This is called when the
disconnect is really requested, which doesn't necessarily have to be
exactly after curl_done in case we want to keep the connection open for
a while.
@ -247,16 +249,16 @@ Library
HTTPS uses in almost every means the same procedure as HTTP, with only two
exceptions: the connect procedure is different and the function used to read
or write from the socket is different, although the latter fact is hidden in
the source by the use of Curl_read() for reading and Curl_write() for writing
the source by the use of curl_read() for reading and curl_write() for writing
data to the remote server.
http_chunks.c contains functions that understands HTTP 1.1 chunked transfer
encoding.
An interesting detail with the HTTP(S) request, is the Curl_add_buffer()
series of functions we use. They append data to one single buffer, and when
the building is done the entire request is sent off in one single write. This
is done this way to overcome problems with flawed firewalls and lame servers.
An interesting detail with the HTTP(S) request, is the add_buffer() series of
functions we use. They append data to one single buffer, and when the
building is done the entire request is sent off in one single write. This is
done this way to overcome problems with flawed firewalls and lame servers.
FTP
@ -282,14 +284,14 @@ Library
LDAP
Everything LDAP is in lib/ldap.c and lib/openldap.c
Everything LDAP is in lib/ldap.c.
GENERAL
URL encoding and decoding, called escaping and unescaping in the source code,
is found in lib/escape.c.
While transferring data in Transfer() a few functions might get used.
While transfering data in Transfer() a few functions might get used.
curl_getdate() in lib/parsedate.c is for HTTP date comparisons (and more).
lib/getenv.c offers curl_getenv() which is for reading environment variables
@ -301,8 +303,8 @@ Library
lib/netrc.c holds the .netrc parser
lib/timeval.c features replacement functions for systems that don't have
gettimeofday() and a few support functions for timeval conversions.
gettimeofday() and a few support functions for timeval convertions.
A function named curl_version() that returns the full curl version string is
found in lib/version.c.
@ -354,10 +356,8 @@ multi interface/non-blocking
The FTP and the SFTP/SCP protocols are thus perfect examples of how we adapt
and adjust the code to allow non-blocking operations even on multi-stage
protocols. They are built around state machines that return when they could
block waiting for data. The DICT, LDAP and TELNET protocols are crappy
examples and they are subject for rewrite in the future to better fit the
libcurl protocol family.
protocols. The DICT, TELNET and TFTP are crappy examples and they are subject
for rewrite in the future to better fit the libcurl protocol family.
SSL libraries
=============
@ -375,13 +375,11 @@ SSL libraries
Library Symbols
===============
All symbols used internally in libcurl must use a 'Curl_' prefix if they're
used in more than a single file. Single-file symbols must be made static.
Public ("exported") symbols must use a 'curl_' prefix. (There are exceptions,
but they are to be changed to follow this pattern in future versions.) Public
API functions are marked with CURL_EXTERN in the public header files so that
all others can be hidden on platforms where this is possible.
but they are to be changed to follow this pattern in future versions.)
Return Codes and Informationals
===============================
@ -410,10 +408,10 @@ Client
main() resides in src/main.c together with most of the client code.
src/tool_hugehelp.c is automatically generated by the mkhelp.pl perl script
to display the complete "manual" and the src/urlglob.c file holds the
functions used for the URL-"globbing" support. Globbing in the sense that
the {} and [] expansion stuff is there.
src/hugehelp.c is automatically generated by the mkhelp.pl perl script to
display the complete "manual" and the src/urlglob.c file holds the functions
used for the URL-"globbing" support. Globbing in the sense that the {} and []
expansion stuff is there.
The client mostly messes around to setup its 'config' struct properly, then
it calls the curl_easy_*() functions of the library and when it gets back
@ -438,65 +436,39 @@ Memory Debugging
information about what they just did. The logged data can then be analyzed
after a complete session,
memanalyze.pl is the perl script present in tests/ that analyzes a log file
generated by the memory tracking system. It detects if resources are
allocated but never freed and other kinds of errors related to resource
management.
memanalyze.pl is the perl script present only present in CVS (not part of the
release archives) that analyzes a log file generated by the memdebug
system. It detects if resources are allocated but never freed and other kinds
of errors related to resource management.
Internally, definition of preprocessor symbol DEBUGBUILD restricts code which
is only compiled for debug enabled builds. And symbol CURLDEBUG is used to
differentiate code which is _only_ used for memory tracking/debugging.
Use -DCURLDEBUG when compiling to enable memory debugging, this is also
switched on by running configure with --enable-curldebug. Use -DDEBUGBUILD
when compiling to enable a debug build or run configure with --enable-debug.
curl --version will list 'Debug' feature for debug enabled builds, and
will list 'TrackMemory' feature for curl debug memory tracking capable
builds. These features are independent and can be controlled when running
the configure script. When --enable-debug is given both features will be
enabled, unless some restriction prevents memory tracking from being used.
Use -DMALLOCDEBUG when compiling to enable memory debugging, this is also
switched on by running configure with --enable-debug.
Test Suite
==========
The test suite is placed in its own subdirectory directly off the root in the
curl archive tree, and it contains a bunch of scripts and a lot of test case
data.
Since November 2000, a test suite has evolved. It is placed in its own
subdirectory directly off the root in the curl archive tree, and it contains
a bunch of scripts and a lot of test case data.
The main test script is runtests.pl that will invoke test servers like
The main test script is runtests.pl that will invoke the two servers
httpserver.pl and ftpserver.pl before all the test cases are performed. The
test suite currently only runs on unix-like platforms.
You'll find a description of the test suite in the tests/README file, and the
test case data files in the tests/FILEFORMAT file.
You'll find a complete description of the test case data files in the
tests/README file.
The test suite automatically detects if curl was built with the memory
debugging enabled, and if it was it will detect memory leaks, too.
debugging enabled, and if it was it will detect memory leaks too.
Building Releases
=================
There's no magic to this. When you consider everything stable enough to be
released, do this:
released, run the 'maketgz' script (using 'make distcheck' will give you a
pretty good view on the status of the current sources). maketgz prompts for
version number of the client and the library before it creates a release
archive. maketgz uses 'make dist' for the actual archive building, why you
need to fill in the Makefile.am files properly for which files that should
be included in the release archives.
1. Tag the source code accordingly.
2. run the 'maketgz' script (using 'make distcheck' will give you a pretty
good view on the status of the current sources). maketgz requires a
version number and creates the release archive. maketgz uses 'make dist'
for the actual archive building, why you need to fill in the Makefile.am
files properly for which files that should be included in the release
archives.
3. When that's complete, sign the output files.
4. Upload
5. Update web site and changelog on site
6. Send announcement to the mailing lists
NOTE: you must have curl checked out from git to be able to do a proper
release build. The release tarballs do not have everything setup in order to
do releases properly.

View File

@ -3,85 +3,7 @@ join in and help us correct one or more of these! Also be sure to check the
changelog of the current development status, as one or more of these problems
may have been fixed since this was written!
80. Curl doesn't recognize certificates in DER format in keychain, but it
works with PEM.
http://curl.haxx.se/bug/view.cgi?id=3439999
79. SMTP. When sending data to multiple recipients, curl will abort and return
failure if one of the recipients indicate failure (on the "RCPT TO"
command). Ordinary mail programs would proceed and still send to the ones
that can receive data. This is subject for change in the future.
http://curl.haxx.se/bug/view.cgi?id=3438362
78. curl and libcurl don't always signal the client properly when "sending"
zero bytes files - it makes for example the command line client not creating
any file at all. Like when using FTP.
http://curl.haxx.se/bug/view.cgi?id=3438362
77. CURLOPT_FORBID_REUSE on a handle prevents NTLM from working since it
"abuses" the underlying connection re-use system and if connections are
forced to close they break the NTLM support.
76. The SOCKET type in Win64 is 64 bits large (and thus so is curl_socket_t on
that platform), and long is only 32 bits. It makes it impossible for
curl_easy_getinfo() to return a socket properly with the CURLINFO_LASTSOCKET
option as for all other operating systems.
75. NTLM authentication involving unicode user name or password only works
properly if built with UNICODE defined together with the schannel/winssl
backend. The original problem was mentioned in:
http://curl.haxx.se/mail/lib-2009-10/0024.html
http://curl.haxx.se/bug/view.cgi?id=2944325
The schannel version verified to work as mentioned in
http://curl.haxx.se/mail/lib-2012-07/0073.html
73. if a connection is made to a FTP server but the server then just never
sends the 220 response or otherwise is dead slow, libcurl will not
acknowledge the connection timeout during that phase but only the "real"
timeout - which may surprise users as it is probably considered to be the
connect phase to most people. Brought up (and is being misunderstood) in:
http://curl.haxx.se/bug/view.cgi?id=2844077
72. "Pausing pipeline problems."
http://curl.haxx.se/mail/lib-2009-07/0214.html
70. Problem re-using easy handle after call to curl_multi_remove_handle
http://curl.haxx.se/mail/lib-2009-07/0249.html
68. "More questions about ares behavior".
http://curl.haxx.se/mail/lib-2009-08/0012.html
67. When creating multipart formposts. The file name part can be encoded with
something beyond ascii but currently libcurl will only pass in the verbatim
string the app provides. There are several browsers that already do this
encoding. The key seems to be the updated draft to RFC2231:
http://tools.ietf.org/html/draft-reschke-rfc2231-in-http-02
66. When using telnet, the time limitation options don't work.
http://curl.haxx.se/bug/view.cgi?id=2818950
65. When doing FTP over a socks proxy or CONNECT through HTTP proxy and the
multi interface is used, libcurl will fail if the (passive) TCP connection
for the data transfer isn't more or less instant as the code does not
properly wait for the connect to be confirmed. See test case 564 for a first
shot at a test case.
63. When CURLOPT_CONNECT_ONLY is used, the handle cannot reliably be re-used
for any further requests or transfers. The work-around is then to close that
handle with curl_easy_cleanup() and create a new. Some more details:
http://curl.haxx.se/mail/lib-2009-04/0300.html
61. If an upload using Expect: 100-continue receives an HTTP 417 response,
it ought to be automatically resent without the Expect:. A workaround is
for the client application to redo the transfer after disabling Expect:.
http://curl.haxx.se/mail/archive-2008-02/0043.html
60. libcurl closes the connection if an HTTP 401 reply is received while it
is waiting for the the 100-continue response.
http://curl.haxx.se/mail/lib-2008-08/0462.html
58. It seems sensible to be able to use CURLOPT_NOBODY and
58. It seems sensible to be able to use CURLOPT_NOBODY and
CURLOPT_FAILONERROR with FTP to detect if a file exists or not, but it is
not working: http://curl.haxx.se/mail/lib-2008-07/0295.html
@ -103,9 +25,14 @@ may have been fixed since this was written!
library header files exporting symbols/macros that should be kept private
to the KfW library. See ticket #5601 at http://krbdev.mit.edu/rt/
53. SFTP busy-loop problem. When doing SFTP uploads, we can see that libcurl
occasionally will busy-loop while waiting for certain network conditions.
Reported by Pavel Shalagin, explained somewhat by Daniel Stenberg here:
http://curl.haxx.se/mail/lib-2008-04/0439.html
52. Gautam Kachroo's issue that identifies a problem with the multi interface
where a connection can be re-used without actually being properly
SSL-negotiated:
SSL-negoatiated:
http://curl.haxx.se/mail/lib-2008-01/0277.html
49. If using --retry and the transfer timeouts (possibly due to using -m or
@ -120,6 +47,13 @@ may have been fixed since this was written!
function will return prematurely and will confuse the rest of the HTTP
protocol code. This should be very rare.
45. libcurl built to support ipv6 uses getaddrinfo() to resolve host names.
getaddrinfo() sorts the response list which effectively kills how libcurl
deals with round-robin DNS entries. All details:
http://curl.haxx.se/mail/lib-2007-07/0168.html
initial suggested function to use for randomizing the response:
http://curl.haxx.se/mail/lib-2007-07/0178.html
43. There seems to be a problem when connecting to the Microsoft telnet server.
http://curl.haxx.se/bug/view.cgi?id=1720605
@ -134,6 +68,13 @@ may have been fixed since this was written!
38. Kumar Swamy Bhatt's problem in ftp/ssl "LIST" operation:
http://curl.haxx.se/mail/lib-2007-01/0103.html
37. Having more than one connection to the same host when doing NTLM
authentication (with performs multiple "passes" and authenticates a
connection rather than a HTTP request), and particularly when using the
multi interface, there's a risk that libcurl will re-use a wrong connection
when doing the different passes in the NTLM negotiation and thus fail to
negotiate (in seemingly mysterious ways).
35. Both SOCKS5 and SOCKS4 proxy connections are done blocking, which is very
bad when used with the multi interface.
@ -148,13 +89,12 @@ may have been fixed since this was written!
30. You need to use -g to the command line tool in order to use RFC2732-style
IPv6 numerical addresses in URLs.
29. IPv6 URLs with zone ID is not nicely supported.
29. IPv6 URLs with zone ID is not supported.
http://www.ietf.org/internet-drafts/draft-fenner-literal-zone-02.txt (expired)
specifies the use of a plus sign instead of a percent when specifying zone
IDs in URLs to get around the problem of percent signs being
special. According to the reporter, Firefox deals with the URL _with_ a
percent letter (which seems like a blatant URL spec violation).
libcurl supports zone IDs where the percent sign is URL-escaped (i.e. %25).
See http://curl.haxx.se/bug/view.cgi?id=1371118
@ -163,6 +103,7 @@ may have been fixed since this was written!
to what winhttp does. See http://curl.haxx.se/bug/view.cgi?id=1281867
23. SOCKS-related problems:
A) libcurl doesn't support SOCKS for IPv6.
B) libcurl doesn't support FTPS over a SOCKS proxy.
E) libcurl doesn't support active FTP over a SOCKS proxy
@ -174,7 +115,7 @@ may have been fixed since this was written!
to VMS file structures and the perceived file sizes stat() returns. A
possible fix would involve sending a "STRU VMS" command.
http://curl.haxx.se/bug/view.cgi?id=1156287
21. FTP ASCII transfers do not follow RFC959. They don't convert the data
accordingly (not for sending nor for receiving). RFC 959 section 3.1.1.1
clearly describes how this should be done:
@ -196,7 +137,7 @@ may have been fixed since this was written!
would not meaningfully support NUL characters within RFC 959 <string>,
anyway (e.g., UNIX pathnames may not contain NUL).
14. Test case 165 might fail on a system which has libidn present, but with an
14. Test case 165 might fail on system which has libidn present, but with an
old iconv version (2.1.3 is a known bad version), since it doesn't recognize
the charset when named ISO8859-1. Changing the name to ISO-8859-1 makes the
test pass, but instead makes it fail on Solaris hosts that use its native

View File

@ -55,10 +55,6 @@ NSS http://www.mozilla.org/projects/security/pki/nss/
grant you different permissions and impose different obligations. You
should select the license that best meets your needs.
axTLS http://axtls.sourceforge.net/
(May be used for SSL/TLS support) Uses a Modified BSD-style license.
c-ares http://daniel.haxx.se/projects/c-ares/license.html
(Used for asynchronous name resolves) Uses an MIT license that is very

View File

@ -1,228 +0,0 @@
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
MAIL ETIQUETTE
1. About the lists
1.1 Mailing Lists
1.2 Netiquette
1.3 Do Not Mail a Single Individual
1.4 Subscription Required
1.5 Moderation of new posters
1.6 Handling trolls and spam
1.7 How to unsubscribe
2. Sending mail
2.1 Reply or New Mail
2.2 Reply to the List
2.3 Use a Sensible Subject
2.4 Do Not Top-Post
2.5 HTML is not for mails
2.6 Quoting
2.7 Digest
2.8 Please Tell Us How You Solved The Problem!
==============================================================================
1. About the lists
1.1 Mailing Lists
The mailing lists we have are all listed and described at
http://curl.haxx.se/mail/
Each mailing list is targeted to a specific set of users and subjects,
please use the one or the ones that suit you the most.
Each mailing list have hundreds up to thousands of readers, meaning that
each mail sent will be received and read by a very large amount of people.
People from various cultures, regions, religions and continents.
1.2 Netiquette
Netiquette is a common name for how to behave on the internet. Of course, in
each particular group and subculture there will be differences in what is
acceptable and what is considered good manners.
This document outlines what we in the cURL project considers to be good
etiquette, and primarily this focus on how to behave on and how to use our
mailing lists.
1.3 Do Not Mail a Single Individual
Many people send one question to one person. One person gets many mails, and
there is only one person who can give you a reply. The question may be
something that other people are also wanting to ask. These other people have
no way to read the reply, but to ask the one person the question. The one
person consequently gets overloaded with mail.
If you really want to contact an individual and perhaps pay for his or her
services, by all means go ahead, but if it's just another curl question,
take it to a suitable list instead.
1.4 Subscription Required
All curl mailing lists require that you are subscribed to allow a mail to go
through to all the subscribers.
If you post without being subscribed (or from a different mail address than
the one you are subscribed with), your mail will simply be silently
discarded. You have to subscribe first, then post.
The reason for this unfortunate and strict subscription policy is of course
to stop spam from pestering the lists.
1.5 Moderation of new posters
Several of the curl mailing lists automatically make all posts from new
subscribers require moderation. This means that after you've subscribed and
send your first mail to a list, that mail will not be let through to the
list until a mailing list administrator has verified that it is OK and
permits it to get posted.
Once a first post has been made that proves the sender is actually talking
about curl-related subjects, the moderation "flag" will be switched off and
future posts will go through without being moderated.
The reason for this moderation policy is that we do suffer from spammers who
actually subscribe and send spam to our lists.
1.6 Handling trolls and spam
Despite our good intentions and hard work to keep spam off the lists and to
maintain a friendly and positive atmosphere, there will be times when spam
and or trolls get through.
Troll - "someone who posts inflammatory, extraneous, or off-topic messages
in an online community"
Spam - "use of electronic messaging systems to send unsolicited bulk
messages"
No matter what, we NEVER EVER respond to trolls or spammers on the list. If
you believe the list admin should do something particular, contact him/her
off-list. The subject will be taken care of as good as possible to prevent
repeated offences, but responding on the list to such messages never lead to
anything good and only puts the light even more on the offender: which was
the entire purpose of it getting to the list in the first place.
Don't feed the trolls!
1.7 How to unsubscribe
You unsubscribe the same way you subscribed in the first place. You go to
the page for the particular mailing list you're subscribed to and you enter
your email address and password and press the unsubscribe button.
Also, this information is included in the headers of every mail that is sent
out to all curl related mailing lists and there's footer in each mail that
links to the "admin" page on which you can unsubscribe and change other
options.
You NEVER EVER email the mailing list requesting someone else to get you off
the list.
2. Sending mail
2.1 Reply or New Mail
Please do not reply to an existing message as a short-cut to post a message
to the lists.
Many mail programs and web archivers use information within mails to keep
them together as "threads", as collections of posts that discuss a certain
subject. If you don't intend to reply on the same or similar subject, don't
just hit reply on an existing mail and change subject, create a new mail.
2.2 Reply to the List
When replying to a message from the list, make sure that you do "group
reply" or "reply to all", and not just reply to the author of the single
mail you reply to.
We're actively discouraging replying back to the single person by setting
the Reply-To: field in outgoing mails back to the mailing list address,
making it harder for people to mail the author only by mistake.
2.3 Use a Sensible Subject
Please use a subject of the mail that makes sense and that is related to the
contents of your mail. It makes it a lot easier to find your mail afterwards
and it makes it easier to track mail threads and topics.
2.4 Do Not Top-Post
If you reply to a message, don't use top-posting. Top-posting is when you
write the new text at the top of a mail and you insert the previous quoted
mail conversation below. It forces users to read the mail in a backwards
order to properly understand it.
This is why top posting is so bad:
A: Because it messes up the order in which people normally read
text.
Q: Why is top-posting such a bad thing?
A: Top-posting.
Q: What is the most annoying thing in e-mail?
Apart from the screwed up read order (especially when mixed together in a
thread when someone responds using the mandated bottom-posting style), it
also makes it impossible to quote only parts of the original mail.
When you reply to a mail. You let the mail client insert the previous mail
quoted. Then you put the cursor on the first line of the mail and you move
down through the mail, deleting all parts of the quotes that don't add
context for your comments. When you want to add a comment you do so, inline,
right after the quotes that relate to your comment. Then you continue
downwards again.
When most of the quotes have been removed and you've added your own words,
you're done!
2.5 HTML is not for mails
Please switch off those HTML encoded messages. You can mail all those funny
mails to your friends. We speak plain text mails.
2.6 Quoting
Quote as little as possible. Just enough to provide the context you cannot
leave out. A lengthy description can be found here:
http://www.netmeister.org/news/learn2quote.html
2.7 Digest
We allow subscribers to subscribe to the "digest" version of the mailing
lists. A digest is a collection of mails lumped together in one single mail.
Should you decide to reply to a mail sent out as a digest, there are two
things you MUST consider if you really really cannot subscribe normally
instead:
Cut off all mails and chatter that is not related to the mail you want to
reply to.
Change the subject name to something sensible and related to the subject,
preferably even the actual subject of the single mail you wanted to reply to
2.8 Please Tell Us How You Solved The Problem!
Many people mail questions to the list, people spend some of their time and
make an effort in providing good answers to these questions.
If you are the one who asks, please consider responding once more in case
one of the hints was what solved your problems. The guys who write answers
feel good to know that they provided a good answer and that you fixed the
problem. Far too often, the person who asked the question is never heard of
again, and we never get to know if he/she is gone because the problem was
solved or perhaps because the problem was unsolvable!
Getting the solution posted also helps other users that experience the same
problem(s). They get to see (possibly in the web archives) that the
suggested fixes actually has helped at least one person.

View File

@ -19,7 +19,7 @@ SIMPLE USAGE
curl http://www.weirdserver.com:8000/
Get a directory listing of an FTP site:
Get a list of a directory of an FTP site:
curl ftp://cool.haxx.se/
@ -46,7 +46,7 @@ SIMPLE USAGE
Get a file from an SSH server using SCP using a private key to authenticate:
curl -u username: --key ~/.ssh/id_dsa --pubkey ~/.ssh/id_dsa.pub \
scp://shell.example.com/~/personal.txt
scp://shell.example.com/~/personal.txt
Get the main page from an IPv6 web server:
@ -54,7 +54,7 @@ SIMPLE USAGE
DOWNLOAD TO A FILE
Get a web page and store in a local file with a specific name:
Get a web page and store in a local file:
curl -o thatpage.html http://www.netscape.com/
@ -113,10 +113,9 @@ USING PASSWORDS
ones out of the ones that the server accepts for the given URL, by using
--anyauth.
NOTE! According to the URL specification, HTTP URLs can not contain a user
and password, so that style will not work when using curl via a proxy, even
though curl allows it at other times. When using a proxy, you _must_ use
the -u style for user and password.
NOTE! Since HTTP URLs don't support user and password, you can't use that
style when using Curl via a proxy. You _must_ use the -u style fetch
during such circumstances.
HTTPS
@ -124,17 +123,11 @@ USING PASSWORDS
PROXY
curl supports both HTTP and SOCKS proxy servers, with optional authentication.
It does not have special support for FTP proxy servers since there are no
standards for those, but it can still be made to work with many of them. You
can also use both HTTP and SOCKS proxies to transfer files to and from FTP
servers.
Get an ftp file using an HTTP proxy named my-proxy that uses port 888:
Get an ftp file using a proxy named my-proxy that uses port 888:
curl -x my-proxy:888 ftp://ftp.leachsite.com/README
Get a file from an HTTP server that requires user and password, using the
Get a file from a HTTP server that requires user and password, using the
same proxy as above:
curl -u user:passwd -x my-proxy:888 http://www.get.this/
@ -143,36 +136,14 @@ PROXY
curl -U user:passwd -x my-proxy:888 http://www.get.this/
A comma-separated list of hosts and domains which do not use the proxy can
be specified as:
curl --noproxy localhost,get.this -x my-proxy:888 http://www.get.this/
If the proxy is specified with --proxy1.0 instead of --proxy or -x, then
curl will use HTTP/1.0 instead of HTTP/1.1 for any CONNECT attempts.
curl also supports SOCKS4 and SOCKS5 proxies with --socks4 and --socks5.
See also the environment variables Curl supports that offer further proxy
See also the environment variables Curl support that offer further proxy
control.
Most FTP proxy servers are set up to appear as a normal FTP server from the
client's perspective, with special commands to select the remote FTP server.
curl supports the -u, -Q and --ftp-account options that can be used to
set up transfers through many FTP proxies. For example, a file can be
uploaded to a remote FTP server using a Blue Coat FTP proxy with the
options:
curl -u "Remote-FTP-Username@remote.ftp.server Proxy-Username:Remote-Pass" \
--ftp-account Proxy-Password --upload-file local-file \
ftp://my-ftp.proxy.server:21/remote/upload/path/
See the manual for your FTP proxy to determine the form it expects to set up
transfers, and curl's -v option to see exactly what curl is sending.
RANGES
HTTP 1.1 introduced byte-ranges. Using this, a client can request
With HTTP 1.1 byte-ranges were introduced. Using this, a client can request
to get only one or more subparts of a specified document. Curl supports
this with the -r flag.
@ -189,7 +160,7 @@ RANGES
Get the first 100 bytes of a document using FTP:
curl -r 0-99 ftp://www.get.this/README
curl -r 0-99 ftp://www.get.this/README
UPLOADING
@ -203,9 +174,9 @@ UPLOADING
curl -T uploadfile -u user:passwd ftp://ftp.upload.com/myfile
Upload a local file to the remote site, and use the local file name at the remote
site too:
Upload a local file to the remote site, and use the local file name remote
too:
curl -T uploadfile -u user:passwd ftp://ftp.upload.com/
Upload a local file to get appended to the remote file:
@ -220,14 +191,14 @@ UPLOADING
HTTP
Upload all data on stdin to a specified HTTP site:
Upload all data on stdin to a specified http site:
curl -T - http://www.upload.com/myfile
Note that the HTTP server must have been configured to accept PUT before
Note that the http server must have been configured to accept PUT before
this can be done successfully.
For other ways to do HTTP data upload, see the POST section below.
For other ways to do http data upload, see the POST section below.
VERBOSE / DEBUG
@ -244,7 +215,7 @@ VERBOSE / DEBUG
this:
curl --trace trace.txt www.haxx.se
DETAILED INFORMATION
@ -290,7 +261,7 @@ POST (HTTP)
The 'variable' names are the names set with "name=" in the <input> tags, and
the data is the contents you want to fill in for the inputs. The data *must*
be properly URL encoded. That means you replace space with + and that you
replace weird letters with %XX where XX is the hexadecimal representation of
write weird letters with %XX where XX is the hexadecimal representation of
the letter's ASCII code.
Example:
@ -329,7 +300,7 @@ POST (HTTP)
If the content-type is not specified, curl will try to guess from the file
extension (it only knows a few), or use the previously specified type (from
an earlier file if several files are specified in a list) or else it will
use the default type 'application/octet-stream'.
using the default type 'text/plain'.
Emulate a fill-in form with -F. Let's say you fill in three fields in a
form. One field is a file name which to post, one field is your name and one
@ -346,12 +317,12 @@ POST (HTTP)
To send two files in one post you can do it in two ways:
1. Send multiple files in a single "field" with a single field name:
curl -F "pictures=@dog.gif,cat.gif"
2. Send two fields with two field names:
curl -F "pictures=@dog.gif,cat.gif"
2. Send two fields with two field names:
curl -F "docpicture=@dog.gif" -F "catpicture=@cat.gif"
curl -F "docpicture=@dog.gif" -F "catpicture=@cat.gif"
To send a field value literally without interpreting a leading '@'
or '<', or an embedded ';type=', use --form-string instead of
@ -362,8 +333,8 @@ POST (HTTP)
REFERRER
An HTTP request has the option to include information about which address
referred it to the actual page. Curl allows you to specify the
A HTTP request has the option to include information about which address
that referred to actual page. Curl allows you to specify the
referrer to be used on the command line. It is especially useful to
fool or trick stupid servers or CGI scripts that rely on that information
being available or contain certain data.
@ -374,7 +345,7 @@ REFERRER
USER AGENT
An HTTP request has the option to include information about the browser
A HTTP request has the option to include information about the browser
that generated the request. Curl allows it to be specified on the command
line. It is especially useful to fool or trick stupid servers or CGI
scripts that only accept certain browsers.
@ -533,7 +504,7 @@ CONFIG FILE
can also specify the long options without the dashes to make it more
readable. You can separate the options and the parameter with spaces, or
with = or :. Comments can be used within the file. If the first letter on a
line is a '#'-symbol the rest of the line is treated as a comment.
line is a '#'-letter the rest of the line is treated as a comment.
If you want the parameter to contain spaces, you must enclose the entire
parameter within double quotes ("). Within those quotes, you specify a
@ -614,21 +585,21 @@ SFTP and SCP and PATH NAMES
FTP and firewalls
The FTP protocol requires one of the involved parties to open a second
connection as soon as data is about to get transferred. There are two ways to
connection as soon as data is about to get transfered. There are two ways to
do this.
The default way for curl is to issue the PASV command which causes the
server to open another port and await another connection performed by the
client. This is good if the client is behind a firewall that doesn't allow
client. This is good if the client is behind a firewall that don't allow
incoming connections.
curl ftp.download.com
If the server, for example, is behind a firewall that doesn't allow connections
on ports other than 21 (or if it just doesn't support the PASV command), the
If the server for example, is behind a firewall that don't allow connections
on other ports than 21 (or if it just doesn't support the PASV command), the
other way to do it is to use the PORT command and instruct the server to
connect to the client on the given IP number and port (as parameters to the
PORT command).
connect to the client on the given (as parameters to the PORT command) IP
number and port.
The -P flag to curl supports a few different options. Your machine may have
several IP-addresses and/or network interfaces and curl allows you to select
@ -686,8 +657,8 @@ HTTPS
If you neglect to specify the password on the command line, you will be
prompted for the correct password before any data can be received.
Many older SSL-servers have problems with SSLv3 or TLS, which newer versions
of OpenSSL etc use, therefore it is sometimes useful to specify what
Many older SSL-servers have problems with SSLv3 or TLS, that newer versions
of OpenSSL etc is using, therefore it is sometimes useful to specify what
SSL-version curl should use. Use -3, -2 or -1 to specify that exact SSL
version to use (for SSLv3, SSLv2 or TLSv1 respectively):
@ -696,38 +667,29 @@ HTTPS
Otherwise, curl will first attempt to use v3 and then v2.
To use OpenSSL to convert your favourite browser's certificate into a PEM
formatted one that curl can use, do something like this:
formatted one that curl can use, do something like this (assuming netscape,
but IE is likely to work similarly):
In Netscape, you start with hitting the 'Security' menu button.
You start with hitting the 'security' menu button in netscape.
Select 'certificates->yours' and then pick a certificate in the list
Select 'certificates->yours' and then pick a certificate in the list
Press the 'Export' button
Press the 'export' button
enter your PIN code for the certs
enter your PIN code for the certs
select a proper place to save it
select a proper place to save it
Run the 'openssl' application to convert the certificate. If you cd to the
openssl installation, you can do it like:
# ./apps/openssl pkcs12 -in [file you saved] -clcerts -out [PEMfile]
In Firefox, select Options, then Advanced, then the Encryption tab,
View Certificates. This opens the Certificate Manager, where you can
Export. Be sure to select PEM for the Save as type.
In Internet Explorer, select Internet Options, then the Content tab, then
Certificates. Then you can Export, and depending on the format you may
need to convert to PEM.
In Chrome, select Settings, then Show Advanced Settings. Under HTTPS/SSL
select Manage Certificates.
RESUMING FILE TRANSFERS
To continue a file transfer where it was previously aborted, curl supports
resume on HTTP(S) downloads as well as FTP uploads and downloads.
resume on http(s) downloads as well as ftp uploads and downloads.
Continue downloading a document:
@ -741,7 +703,7 @@ RESUMING FILE TRANSFERS
curl -C - -o file http://www.server.com/
(*1) = This requires that the FTP server supports the non-standard command
(*1) = This requires that the ftp server supports the non-standard command
SIZE. If it doesn't, curl will say so.
(*2) = This requires that the web server supports at least HTTP/1.1. If it
@ -750,7 +712,7 @@ RESUMING FILE TRANSFERS
TIME CONDITIONS
HTTP allows a client to specify a time condition for the document it
requests. It is If-Modified-Since or If-Unmodified-Since. Curl allows you to
requests. It is If-Modified-Since or If-Unmodified-Since. Curl allow you to
specify them with the -z/--time-cond flag.
For example, you can easily make a download that only gets performed if the
@ -798,7 +760,7 @@ LDAP
and offer ldap:// support.
LDAP is a complex thing and writing an LDAP query is not an easy task. I do
advise you to dig up the syntax description for that elsewhere. Two places
advice you to dig up the syntax description for that elsewhere. Two places
that might suit you are:
Netscape's "Netscape Directory SDK 3.0 for C Programmer's Guide Chapter 10:
@ -807,7 +769,7 @@ LDAP
RFC 2255, "The LDAP URL Format" http://curl.haxx.se/rfc/rfc2255.txt
To show you an example, this is how I can get all people from my local LDAP
To show you an example, this is now I can get all people from my local LDAP
server that has a certain sub-domain in their email address:
curl -B "ldap://ldap.frontec.se/o=frontec??sub?mail=*sth.frontec.se"
@ -823,7 +785,7 @@ ENVIRONMENT VARIABLES
They should be set for protocol-specific proxies. General proxy should be
set with
ALL_PROXY
A comma-separated list of host names that shouldn't go through any proxy is
@ -831,9 +793,8 @@ ENVIRONMENT VARIABLES
NO_PROXY
If the host name matches one of these strings, or the host is within the
domain of one of these strings, transactions with that node will not be
proxied.
If a tail substring of the domain-path for a host matches one of these
strings, transactions with that node will not be proxied.
The usage of the -x/--proxy flag overrides the environment variables.
@ -841,15 +802,15 @@ ENVIRONMENT VARIABLES
NETRC
Unix introduced the .netrc concept a long time ago. It is a way for a user
to specify name and password for commonly visited FTP sites in a file so
to specify name and password for commonly visited ftp sites in a file so
that you don't have to type them in each time you visit those sites. You
realize this is a big security risk if someone else gets hold of your
passwords, so therefore most unix programs won't read this file unless it is
only readable by yourself (curl doesn't care though).
Curl supports .netrc files if told to (using the -n/--netrc and
--netrc-optional options). This is not restricted to just FTP,
so curl can use it for all protocols where authentication is used.
Curl supports .netrc files if told so (using the -n/--netrc and
--netrc-optional options). This is not restricted to only ftp,
but curl can use it for all protocols where authentication is used.
A very simple .netrc file could look something like:
@ -870,7 +831,7 @@ KERBEROS FTP TRANSFER
Curl supports kerberos4 and kerberos5/GSSAPI for FTP transfers. You need
the kerberos package installed and used at curl build time for it to be
available.
used.
First, get the krb-ticket the normal way, like with the kinit/kauth tool.
Then use curl in way similar to:
@ -905,7 +866,7 @@ TELNET
- NEW_ENV=<var,val> Sets an environment variable.
NOTE: The telnet protocol does not specify any way to login with a specified
NOTE: the telnet protocol does not specify any way to login with a specified
user and password so curl can't do that automatically. To do that, you need
to track when the login prompt is received and send the username and
password accordingly.
@ -924,7 +885,7 @@ PERSISTENT CONNECTIONS
Note that curl cannot use persistent connections for transfers that are used
in subsequence curl invokes. Try to stuff as many URLs as possible on the
same command line if they are using the same host, as that'll make the
transfers faster. If you use an HTTP proxy for file transfers, practically
transfers faster. If you use a http proxy for file transfers, practically
all transfers will be persistent.
MULTIPLE TRANSFERS WITH A SINGLE COMMAND LINE
@ -965,28 +926,6 @@ IPv6
IPv6 addresses provided other than in URLs (e.g. to the --proxy, --interface
or --ftp-port options) should not be URL encoded.
METALINK
Curl supports Metalink (both version 3 and 4 (RFC 5854) are supported), a way
to list multiple URIs and hashes for a file. Curl will make use of the mirrors
listed within for failover if there are errors (such as the file or server not
being available). It will also verify the hash of the file after the download
completes. The Metalink file itself is downloaded and processed in memory and
not stored in the local file system.
Example to use a remote Metalink file:
curl --metalink http://www.example.com/example.metalink
To use a Metalink file in the local file system, use FILE protocol (file://):
curl --metalink file://example.metalink
Please note that if FILE protocol is disabled, there is no way to use a local
Metalink file at the time of this writing. Also note that if --metalink and
--include are used together, --include will be ignored. This is because including
headers in the response will break Metalink parser and if the headers are included
in the file described in Metalink file, hash check will fail.
MAILING LISTS

View File

@ -1,30 +1,12 @@
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
# $Id: Makefile.am,v 1.36 2005/10/28 21:34:51 bagder Exp $
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://curl.haxx.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
###########################################################################
AUTOMAKE_OPTIONS = foreign no-dependencies
man_MANS = curl.1 curl-config.1 mk-ca-bundle.1
GENHTMLPAGES = curl.html curl-config.html mk-ca-bundle.html
PDFPAGES = curl.pdf curl-config.pdf mk-ca-bundle.pdf
man_MANS = curl.1 curl-config.1
GENHTMLPAGES = curl.html curl-config.html
PDFPAGES = curl.pdf curl-config.pdf
HTMLPAGES = $(GENHTMLPAGES) index.html
@ -35,8 +17,7 @@ CLEANFILES = $(GENHTMLPAGES) $(PDFPAGES)
EXTRA_DIST = MANUAL BUGS CONTRIBUTE FAQ FEATURES INTERNALS SSLCERTS \
README.win32 RESOURCES TODO TheArtOfHttpScripting THANKS VERSIONS \
KNOWN_BUGS BINDINGS $(man_MANS) $(HTMLPAGES) HISTORY INSTALL \
$(PDFPAGES) LICENSE-MIXING README.netware DISTRO-DILEMMA INSTALL.devcpp \
MAIL-ETIQUETTE HTTP-COOKIES
$(PDFPAGES) LICENSE-MIXING README.netware DISTRO-DILEMMA INSTALL.devcpp
MAN2HTML= roffit < $< >$@

View File

@ -1,9 +1,8 @@
# Makefile.in generated by automake 1.11.6 from Makefile.am.
# Makefile.in generated by automake 1.9.6 from Makefile.am.
# @configure_input@
# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software
# Foundation, Inc.
# 2003, 2004, 2005 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@ -15,50 +14,18 @@
@SET_MAKE@
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
# $Id: Makefile.am,v 1.36 2005/10/28 21:34:51 bagder Exp $
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://curl.haxx.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
###########################################################################
srcdir = @srcdir@
top_srcdir = @top_srcdir@
VPATH = @srcdir@
am__make_dryrun = \
{ \
am__dry=no; \
case $$MAKEFLAGS in \
*\\[\ \ ]*) \
echo 'am--echo: ; @echo "AM" OK' | $(MAKE) -f - 2>/dev/null \
| grep '^AM OK$$' >/dev/null || am__dry=yes;; \
*) \
for am__flg in $$MAKEFLAGS; do \
case $$am__flg in \
*=*|--*) ;; \
*n*) am__dry=yes; break;; \
esac; \
done;; \
esac; \
test $$am__dry = yes; \
}
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkglibexecdir = $(libexecdir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
top_builddir = ..
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
INSTALL = @INSTALL@
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
@ -79,106 +46,35 @@ ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/curl-compilers.m4 \
$(top_srcdir)/m4/curl-confopts.m4 \
$(top_srcdir)/m4/curl-functions.m4 \
$(top_srcdir)/m4/curl-openssl.m4 \
$(top_srcdir)/m4/curl-override.m4 \
$(top_srcdir)/m4/curl-reentrant.m4 $(top_srcdir)/m4/libtool.m4 \
$(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \
$(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \
$(top_srcdir)/m4/xc-cc-check.m4 \
$(top_srcdir)/m4/xc-translit.m4 \
$(top_srcdir)/m4/xc-val-flgs.m4 \
$(top_srcdir)/m4/zz40-xc-ovr.m4 \
$(top_srcdir)/m4/zz50-xc-ovr.m4 \
$(top_srcdir)/m4/zz60-xc-ovr.m4 $(top_srcdir)/acinclude.m4 \
$(top_srcdir)/m4/curl-reentrant.m4 $(top_srcdir)/acinclude.m4 \
$(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs
CONFIG_HEADER = $(top_builddir)/lib/curl_config.h \
CONFIG_HEADER = $(top_builddir)/lib/config.h \
$(top_builddir)/src/config.h \
$(top_builddir)/include/curl/curlbuild.h
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
depcomp =
am__depfiles_maybe =
SOURCES =
DIST_SOURCES =
RECURSIVE_TARGETS = all-recursive check-recursive dvi-recursive \
html-recursive info-recursive install-data-recursive \
install-dvi-recursive install-exec-recursive \
install-html-recursive install-info-recursive \
install-pdf-recursive install-ps-recursive install-recursive \
installcheck-recursive installdirs-recursive pdf-recursive \
ps-recursive uninstall-recursive
am__can_run_installinfo = \
case $$AM_UPDATE_INFO_DIR in \
n|no|NO) false;; \
*) (install-info --version) >/dev/null 2>&1;; \
esac
am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
am__vpath_adj = case $$p in \
$(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
*) f=$$p;; \
esac;
am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
am__install_max = 40
am__nobase_strip_setup = \
srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
am__nobase_strip = \
for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
am__nobase_list = $(am__nobase_strip_setup); \
for p in $$list; do echo "$$p $$p"; done | \
sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
$(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
if (++n[$$2] == $(am__install_max)) \
{ print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
END { for (dir in files) print dir, files[dir] }'
am__base_list = \
sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
am__uninstall_files_from_dir = { \
test -z "$$files" \
|| { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
|| { echo " ( cd '$$dir' && rm -f" $$files ")"; \
$(am__cd) "$$dir" && rm -f $$files; }; \
}
install-exec-recursive install-info-recursive \
install-recursive installcheck-recursive installdirs-recursive \
pdf-recursive ps-recursive uninstall-info-recursive \
uninstall-recursive
man1dir = $(mandir)/man1
am__installdirs = "$(DESTDIR)$(man1dir)"
MANS = $(man_MANS)
RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive \
distclean-recursive maintainer-clean-recursive
AM_RECURSIVE_TARGETS = $(RECURSIVE_TARGETS:-recursive=) \
$(RECURSIVE_CLEAN_TARGETS:-recursive=) tags TAGS ctags CTAGS \
distdir
ETAGS = etags
CTAGS = ctags
DIST_SUBDIRS = $(SUBDIRS)
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
am__relativize = \
dir0=`pwd`; \
sed_first='s,^\([^/]*\)/.*$$,\1,'; \
sed_rest='s,^[^/]*/*,,'; \
sed_last='s,^.*/\([^/]*\)$$,\1,'; \
sed_butlast='s,/*[^/]*$$,,'; \
while test -n "$$dir1"; do \
first=`echo "$$dir1" | sed -e "$$sed_first"`; \
if test "$$first" != "."; then \
if test "$$first" = ".."; then \
dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
else \
first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
if test "$$first2" = "$$first"; then \
dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
else \
dir2="../$$dir2"; \
fi; \
dir0="$$dir0"/"$$first"; \
fi; \
fi; \
dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
done; \
reldir="$$dir2"
ACLOCAL = @ACLOCAL@
AMDEP_FALSE = @AMDEP_FALSE@
AMDEP_TRUE = @AMDEP_TRUE@
AMTAR = @AMTAR@
AR = @AR@
AS = @AS@
@ -186,91 +82,73 @@ AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
BLANK_AT_MAKETIME = @BLANK_AT_MAKETIME@
CC = @CC@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CFLAG_CURL_SYMBOL_HIDING = @CFLAG_CURL_SYMBOL_HIDING@
CONFIGURE_OPTIONS = @CONFIGURE_OPTIONS@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CPPFLAG_CURL_STATICLIB = @CPPFLAG_CURL_STATICLIB@
CURLVERSION = @CURLVERSION@
CROSSCOMPILING_FALSE = @CROSSCOMPILING_FALSE@
CROSSCOMPILING_TRUE = @CROSSCOMPILING_TRUE@
CURL_CA_BUNDLE = @CURL_CA_BUNDLE@
CURL_CFLAG_EXTRAS = @CURL_CFLAG_EXTRAS@
CURL_DISABLE_DICT = @CURL_DISABLE_DICT@
CURL_DISABLE_FILE = @CURL_DISABLE_FILE@
CURL_DISABLE_FTP = @CURL_DISABLE_FTP@
CURL_DISABLE_GOPHER = @CURL_DISABLE_GOPHER@
CURL_DISABLE_HTTP = @CURL_DISABLE_HTTP@
CURL_DISABLE_IMAP = @CURL_DISABLE_IMAP@
CURL_DISABLE_LDAP = @CURL_DISABLE_LDAP@
CURL_DISABLE_LDAPS = @CURL_DISABLE_LDAPS@
CURL_DISABLE_POP3 = @CURL_DISABLE_POP3@
CURL_DISABLE_PROXY = @CURL_DISABLE_PROXY@
CURL_DISABLE_RTSP = @CURL_DISABLE_RTSP@
CURL_DISABLE_SMTP = @CURL_DISABLE_SMTP@
CURL_DISABLE_TELNET = @CURL_DISABLE_TELNET@
CURL_DISABLE_TFTP = @CURL_DISABLE_TFTP@
CURL_NETWORK_AND_TIME_LIBS = @CURL_NETWORK_AND_TIME_LIBS@
CURL_NETWORK_LIBS = @CURL_NETWORK_LIBS@
CURL_LIBS = @CURL_LIBS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
DLLTOOL = @DLLTOOL@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
ECHO = @ECHO@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
ENABLE_SHARED = @ENABLE_SHARED@
EXEEXT = @EXEEXT@
FGREP = @FGREP@
GREP = @GREP@
HAVE_GNUTLS_SRP = @HAVE_GNUTLS_SRP@
HAVE_LDAP_SSL = @HAVE_LDAP_SSL@
HAVE_ARES = @HAVE_ARES@
HAVE_LIBZ = @HAVE_LIBZ@
HAVE_NSS_INITCONTEXT = @HAVE_NSS_INITCONTEXT@
HAVE_SSLEAY_SRP = @HAVE_SSLEAY_SRP@
HAVE_LIBZ_FALSE = @HAVE_LIBZ_FALSE@
HAVE_LIBZ_TRUE = @HAVE_LIBZ_TRUE@
HAVE_PK11_CREATEGENERICOBJECT = @HAVE_PK11_CREATEGENERICOBJECT@
IDN_ENABLED = @IDN_ENABLED@
INSTALL = @INSTALL@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
IPV6_ENABLED = @IPV6_ENABLED@
KRB4_ENABLED = @KRB4_ENABLED@
LD = @LD@
LDFLAGS = @LDFLAGS@
LIBCURL_LIBS = @LIBCURL_LIBS@
LIBMETALINK_CPPFLAGS = @LIBMETALINK_CPPFLAGS@
LIBMETALINK_LDFLAGS = @LIBMETALINK_LDFLAGS@
LIBMETALINK_LIBS = @LIBMETALINK_LIBS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LIBTOOL = @LIBTOOL@
LIPO = @LIPO@
LN_S = @LN_S@
LTLIBOBJS = @LTLIBOBJS@
MAINT = @MAINT@
MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@
MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@
MAKEINFO = @MAKEINFO@
MANIFEST_TOOL = @MANIFEST_TOOL@
MANOPT = @MANOPT@
MKDIR_P = @MKDIR_P@
NM = @NM@
MIMPURE_FALSE = @MIMPURE_FALSE@
MIMPURE_TRUE = @MIMPURE_TRUE@
NMEDIT = @NMEDIT@
NO_UNDEFINED_FALSE = @NO_UNDEFINED_FALSE@
NO_UNDEFINED_TRUE = @NO_UNDEFINED_TRUE@
NROFF = @NROFF@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
PERL = @PERL@
@ -284,35 +162,27 @@ REQUIRE_LIB_DEPS = @REQUIRE_LIB_DEPS@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
SONAME_BUMP_FALSE = @SONAME_BUMP_FALSE@
SONAME_BUMP_TRUE = @SONAME_BUMP_TRUE@
SSL_ENABLED = @SSL_ENABLED@
STATICLIB_FALSE = @STATICLIB_FALSE@
STATICLIB_TRUE = @STATICLIB_TRUE@
STRIP = @STRIP@
SUPPORT_FEATURES = @SUPPORT_FEATURES@
SUPPORT_PROTOCOLS = @SUPPORT_PROTOCOLS@
USE_ARES = @USE_ARES@
USE_AXTLS = @USE_AXTLS@
USE_CYASSL = @USE_CYASSL@
USE_DARWINSSL = @USE_DARWINSSL@
TEST_SERVER_LIBS = @TEST_SERVER_LIBS@
USE_GNUTLS = @USE_GNUTLS@
USE_GNUTLS_NETTLE = @USE_GNUTLS_NETTLE@
USE_LIBRTMP = @USE_LIBRTMP@
USE_LIBSSH2 = @USE_LIBSSH2@
USE_MANUAL_FALSE = @USE_MANUAL_FALSE@
USE_MANUAL_TRUE = @USE_MANUAL_TRUE@
USE_NSS = @USE_NSS@
USE_OPENLDAP = @USE_OPENLDAP@
USE_POLARSSL = @USE_POLARSSL@
USE_SCHANNEL = @USE_SCHANNEL@
USE_SSLEAY = @USE_SSLEAY@
USE_WINDOWS_SSPI = @USE_WINDOWS_SSPI@
VERSION = @VERSION@
VERSIONED_FLAVOUR = @VERSIONED_FLAVOUR@
VERSIONNUM = @VERSIONNUM@
ZLIB_LIBS = @ZLIB_LIBS@
abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
ac_ct_AR = @ac_ct_AR@
ac_ct_CC = @ac_ct_CC@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
am__fastdepCC_FALSE = @am__fastdepCC_FALSE@
am__fastdepCC_TRUE = @am__fastdepCC_TRUE@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
@ -324,7 +194,6 @@ build_alias = @build_alias@
build_cpu = @build_cpu@
build_os = @build_os@
build_vendor = @build_vendor@
builddir = @builddir@
datadir = @datadir@
datarootdir = @datarootdir@
docdir = @docdir@
@ -353,25 +222,20 @@ program_transform_name = @program_transform_name@
psdir = @psdir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
subdirs = @subdirs@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
AUTOMAKE_OPTIONS = foreign no-dependencies
man_MANS = curl.1 curl-config.1 mk-ca-bundle.1
GENHTMLPAGES = curl.html curl-config.html mk-ca-bundle.html
PDFPAGES = curl.pdf curl-config.pdf mk-ca-bundle.pdf
man_MANS = curl.1 curl-config.1
GENHTMLPAGES = curl.html curl-config.html
PDFPAGES = curl.pdf curl-config.pdf
HTMLPAGES = $(GENHTMLPAGES) index.html
SUBDIRS = examples libcurl
CLEANFILES = $(GENHTMLPAGES) $(PDFPAGES)
EXTRA_DIST = MANUAL BUGS CONTRIBUTE FAQ FEATURES INTERNALS SSLCERTS \
README.win32 RESOURCES TODO TheArtOfHttpScripting THANKS VERSIONS \
KNOWN_BUGS BINDINGS $(man_MANS) $(HTMLPAGES) HISTORY INSTALL \
$(PDFPAGES) LICENSE-MIXING README.netware DISTRO-DILEMMA INSTALL.devcpp \
MAIL-ETIQUETTE HTTP-COOKIES
$(PDFPAGES) LICENSE-MIXING README.netware DISTRO-DILEMMA INSTALL.devcpp
MAN2HTML = roffit < $< >$@
SUFFIXES = .1 .html .pdf
@ -383,14 +247,14 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(am__confi
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
&& { if test -f $@; then exit 0; else break; fi; }; \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh \
&& exit 0; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign docs/Makefile'; \
$(am__cd) $(top_srcdir) && \
$(AUTOMAKE) --foreign docs/Makefile
echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign docs/Makefile'; \
cd $(top_srcdir) && \
$(AUTOMAKE) --foreign docs/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
@ -408,56 +272,61 @@ $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
mostlyclean-libtool:
-rm -f *.lo
clean-libtool:
-rm -rf .libs _libs
install-man1: $(man_MANS)
@$(NORMAL_INSTALL)
@list1=''; \
list2='$(man_MANS)'; \
test -n "$(man1dir)" \
&& test -n "`echo $$list1$$list2`" \
|| exit 0; \
echo " $(MKDIR_P) '$(DESTDIR)$(man1dir)'"; \
$(MKDIR_P) "$(DESTDIR)$(man1dir)" || exit 1; \
{ for i in $$list1; do echo "$$i"; done; \
if test -n "$$list2"; then \
for i in $$list2; do echo "$$i"; done \
| sed -n '/\.1[a-z]*$$/p'; \
fi; \
} | while read p; do \
if test -f $$p; then d=; else d="$(srcdir)/"; fi; \
echo "$$d$$p"; echo "$$p"; \
done | \
sed -e 'n;s,.*/,,;p;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \
-e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,' | \
sed 'N;N;s,\n, ,g' | { \
list=; while read file base inst; do \
if test "$$base" = "$$inst"; then list="$$list $$file"; else \
echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \
$(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst" || exit $$?; \
fi; \
done; \
for i in $$list; do echo "$$i"; done | $(am__base_list) | \
while read files; do \
test -z "$$files" || { \
echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(man1dir)'"; \
$(INSTALL_DATA) $$files "$(DESTDIR)$(man1dir)" || exit $$?; }; \
done; }
distclean-libtool:
-rm -f libtool
uninstall-info-am:
install-man1: $(man1_MANS) $(man_MANS)
@$(NORMAL_INSTALL)
test -z "$(man1dir)" || $(mkdir_p) "$(DESTDIR)$(man1dir)"
@list='$(man1_MANS) $(dist_man1_MANS) $(nodist_man1_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.1*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \
else file=$$i; fi; \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
1*) ;; \
*) ext='1' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " $(INSTALL_DATA) '$$file' '$(DESTDIR)$(man1dir)/$$inst'"; \
$(INSTALL_DATA) "$$file" "$(DESTDIR)$(man1dir)/$$inst"; \
done
uninstall-man1:
@$(NORMAL_UNINSTALL)
@list=''; test -n "$(man1dir)" || exit 0; \
files=`{ for i in $$list; do echo "$$i"; done; \
l2='$(man_MANS)'; for i in $$l2; do echo "$$i"; done | \
sed -n '/\.1[a-z]*$$/p'; \
} | sed -e 's,.*/,,;h;s,.*\.,,;s,^[^1][0-9a-z]*$$,1,;x' \
-e 's,\.[0-9a-z]*$$,,;$(transform);G;s,\n,.,'`; \
dir='$(DESTDIR)$(man1dir)'; $(am__uninstall_files_from_dir)
@list='$(man1_MANS) $(dist_man1_MANS) $(nodist_man1_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.1*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
1*) ;; \
*) ext='1' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " rm -f '$(DESTDIR)$(man1dir)/$$inst'"; \
rm -f "$(DESTDIR)$(man1dir)/$$inst"; \
done
# This directory's subdirectories are mostly independent; you can cd
# into them and run `make' without going through this Makefile.
@ -466,7 +335,7 @@ uninstall-man1:
# (which will cause the Makefiles to be regenerated when you run `make');
# (2) otherwise, pass the desired values on the `make' command line.
$(RECURSIVE_TARGETS):
@fail= failcom='exit 1'; \
@failcom='exit 1'; \
for f in x $$MAKEFLAGS; do \
case $$f in \
*=* | --[!k]*);; \
@ -483,15 +352,16 @@ $(RECURSIVE_TARGETS):
else \
local_target="$$target"; \
fi; \
($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
(cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
|| eval $$failcom; \
done; \
if test "$$dot_seen" = "no"; then \
$(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
fi; test -z "$$fail"
$(RECURSIVE_CLEAN_TARGETS):
@fail= failcom='exit 1'; \
mostlyclean-recursive clean-recursive distclean-recursive \
maintainer-clean-recursive:
@failcom='exit 1'; \
for f in x $$MAKEFLAGS; do \
case $$f in \
*=* | --[!k]*);; \
@ -517,16 +387,16 @@ $(RECURSIVE_CLEAN_TARGETS):
else \
local_target="$$target"; \
fi; \
($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
(cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
|| eval $$failcom; \
done && test -z "$$fail"
tags-recursive:
list='$(SUBDIRS)'; for subdir in $$list; do \
test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \
test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) tags); \
done
ctags-recursive:
list='$(SUBDIRS)'; for subdir in $$list; do \
test "$$subdir" = . || ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \
test "$$subdir" = . || (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) ctags); \
done
ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
@ -534,14 +404,14 @@ ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
$(AWK) '{ files[$$0] = 1; nonempty = 1; } \
END { if (nonempty) { for (i in files) print i; }; }'`; \
$(AWK) ' { files[$$0] = 1; } \
END { for (i in files) print i; }'`; \
mkid -fID $$unique
tags: TAGS
TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
$(TAGS_FILES) $(LISP)
set x; \
tags=; \
here=`pwd`; \
if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
include_option=--etags-include; \
@ -553,111 +423,81 @@ TAGS: tags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
list='$(SUBDIRS)'; for subdir in $$list; do \
if test "$$subdir" = .; then :; else \
test ! -f $$subdir/TAGS || \
set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
tags="$$tags $$include_option=$$here/$$subdir/TAGS"; \
fi; \
done; \
list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
$(AWK) '{ files[$$0] = 1; nonempty = 1; } \
END { if (nonempty) { for (i in files) print i; }; }'`; \
shift; \
if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
$(AWK) ' { files[$$0] = 1; } \
END { for (i in files) print i; }'`; \
if test -z "$(ETAGS_ARGS)$$tags$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
if test $$# -gt 0; then \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
"$$@" $$unique; \
else \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
$$unique; \
fi; \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
$$tags $$unique; \
fi
ctags: CTAGS
CTAGS: ctags-recursive $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
$(TAGS_FILES) $(LISP)
tags=; \
here=`pwd`; \
list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
$(AWK) '{ files[$$0] = 1; nonempty = 1; } \
END { if (nonempty) { for (i in files) print i; }; }'`; \
test -z "$(CTAGS_ARGS)$$unique" \
$(AWK) ' { files[$$0] = 1; } \
END { for (i in files) print i; }'`; \
test -z "$(CTAGS_ARGS)$$tags$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$unique
$$tags $$unique
GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& $(am__cd) $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) "$$here"
&& cd $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) $$here
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
distdir: $(DISTFILES)
@list='$(MANS)'; if test -n "$$list"; then \
list=`for p in $$list; do \
if test -f $$p; then d=; else d="$(srcdir)/"; fi; \
if test -f "$$d$$p"; then echo "$$d$$p"; else :; fi; done`; \
if test -n "$$list" && \
grep 'ab help2man is required to generate this page' $$list >/dev/null; then \
echo "error: found man pages containing the \`missing help2man' replacement text:" >&2; \
grep -l 'ab help2man is required to generate this page' $$list | sed 's/^/ /' >&2; \
echo " to fix them, install help2man, remove and regenerate the man pages;" >&2; \
echo " typically \`make maintainer-clean' will remove them" >&2; \
exit 1; \
else :; fi; \
else :; fi
@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
list='$(DISTFILES)'; \
dist_files=`for file in $$list; do echo $$file; done | \
sed -e "s|^$$srcdirstrip/||;t" \
-e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
case $$dist_files in \
*/*) $(MKDIR_P) `echo "$$dist_files" | \
sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
sort -u` ;; \
esac; \
for file in $$dist_files; do \
@srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
list='$(DISTFILES)'; for file in $$list; do \
case $$file in \
$(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
$(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
esac; \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
if test -d $$d/$$file; then \
dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
if test -d "$(distdir)/$$file"; then \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
if test "$$dir" != "$$file" && test "$$dir" != "."; then \
dir="/$$dir"; \
$(mkdir_p) "$(distdir)$$dir"; \
else \
test -f "$(distdir)/$$file" \
|| cp -p $$d/$$file "$(distdir)/$$file" \
dir=''; \
fi; \
if test -d $$d/$$file; then \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
fi; \
cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
else \
test -f $(distdir)/$$file \
|| cp -p $$d/$$file $(distdir)/$$file \
|| exit 1; \
fi; \
done
@list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
if test "$$subdir" = .; then :; else \
$(am__make_dryrun) \
|| test -d "$(distdir)/$$subdir" \
|| $(MKDIR_P) "$(distdir)/$$subdir" \
|| exit 1; \
dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
$(am__relativize); \
new_distdir=$$reldir; \
dir1=$$subdir; dir2="$(top_distdir)"; \
$(am__relativize); \
new_top_distdir=$$reldir; \
echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
echo " am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
($(am__cd) $$subdir && \
test -d "$(distdir)/$$subdir" \
|| $(mkdir_p) "$(distdir)/$$subdir" \
|| exit 1; \
distdir=`$(am__cd) $(distdir) && pwd`; \
top_distdir=`$(am__cd) $(top_distdir) && pwd`; \
(cd $$subdir && \
$(MAKE) $(AM_MAKEFLAGS) \
top_distdir="$$new_top_distdir" \
distdir="$$new_distdir" \
am__remove_distdir=: \
am__skip_length_check=: \
am__skip_mode_fix=: \
top_distdir="$$top_distdir" \
distdir="$$distdir/$$subdir" \
distdir) \
|| exit 1; \
fi; \
@ -668,7 +508,7 @@ all-am: Makefile $(MANS)
installdirs: installdirs-recursive
installdirs-am:
for dir in "$(DESTDIR)$(man1dir)"; do \
test -z "$$dir" || $(MKDIR_P) "$$dir"; \
test -z "$$dir" || $(mkdir_p) "$$dir"; \
done
install: install-recursive
install-exec: install-exec-recursive
@ -680,15 +520,10 @@ install-am: all-am
installcheck: installcheck-recursive
install-strip:
if test -z '$(STRIP)'; then \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
install; \
else \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
"INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
fi
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
`test -z '$(STRIP)' || \
echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
mostlyclean-generic:
clean-generic:
@ -696,7 +531,6 @@ clean-generic:
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@ -707,44 +541,25 @@ clean-am: clean-generic clean-libtool mostlyclean-am
distclean: distclean-recursive
-rm -f Makefile
distclean-am: clean-am distclean-generic distclean-tags
distclean-am: clean-am distclean-generic distclean-libtool \
distclean-tags
dvi: dvi-recursive
dvi-am:
html-am:
info: info-recursive
info-am:
install-data-am: install-man
install-dvi: install-dvi-recursive
install-dvi-am:
install-exec-am:
install-html: install-html-recursive
install-html-am:
install-info: install-info-recursive
install-info-am:
install-man: install-man1
install-pdf: install-pdf-recursive
install-pdf-am:
install-ps: install-ps-recursive
install-ps-am:
installcheck-am:
maintainer-clean: maintainer-clean-recursive
@ -761,27 +576,25 @@ ps: ps-recursive
ps-am:
uninstall-am: uninstall-man
uninstall-am: uninstall-info-am uninstall-man
uninstall-info: uninstall-info-recursive
uninstall-man: uninstall-man1
.MAKE: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) ctags-recursive \
install-am install-strip tags-recursive
.PHONY: $(RECURSIVE_CLEAN_TARGETS) $(RECURSIVE_TARGETS) CTAGS GTAGS \
all all-am check check-am clean clean-generic clean-libtool \
ctags ctags-recursive distclean distclean-generic \
distclean-libtool distclean-tags distdir dvi dvi-am html \
.PHONY: $(RECURSIVE_TARGETS) CTAGS GTAGS all all-am check check-am \
clean clean-generic clean-libtool clean-recursive ctags \
ctags-recursive distclean distclean-generic distclean-libtool \
distclean-recursive distclean-tags distdir dvi dvi-am html \
html-am info info-am install install-am install-data \
install-data-am install-dvi install-dvi-am install-exec \
install-exec-am install-html install-html-am install-info \
install-info-am install-man install-man1 install-pdf \
install-pdf-am install-ps install-ps-am install-strip \
install-data-am install-exec install-exec-am install-info \
install-info-am install-man install-man1 install-strip \
installcheck installcheck-am installdirs installdirs-am \
maintainer-clean maintainer-clean-generic mostlyclean \
mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
tags tags-recursive uninstall uninstall-am uninstall-man \
uninstall-man1
maintainer-clean maintainer-clean-generic \
maintainer-clean-recursive mostlyclean mostlyclean-generic \
mostlyclean-libtool mostlyclean-recursive pdf pdf-am ps ps-am \
tags tags-recursive uninstall uninstall-am uninstall-info-am \
uninstall-man uninstall-man1
html: $(HTMLPAGES)
@ -799,7 +612,6 @@ pdf: $(PDFPAGES)
ps2pdf $$foo.ps $@; \
rm $$foo.ps; \
echo "converted $< to $@")
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:

View File

@ -1,7 +1,7 @@
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
README.netware
@ -10,9 +10,9 @@ README.netware
Curl has been successfully compiled with gcc / nlmconv on different flavours
of Linux as well as with the official Metrowerks CodeWarrior compiler.
While not being the main development target, a continously growing share of
While not being the main development target, a continously growing share of
curl users are NetWare-based, specially also consuming the lib from PHP.
The unix-style man pages are tricky to read on windows, so therefore are all
those pages converted to HTML as well as pdf, and included in the release
archives.
@ -24,4 +24,4 @@ README.netware
Read the INSTALL file for instructions how to compile curl self.

View File

@ -1,7 +1,7 @@
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
README.win32

View File

@ -1,7 +1,7 @@
_ _ ____ _
Project ___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
_ _ ____ _
Project ___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
@ -32,16 +32,12 @@ This document lists documents and standards used by curl.
RFC 2068 - HTTP 1.1 (obsoleted by RFC 2616)
RFC 2104 - Keyed-Hashing for Message Authentication
RFC 2109 - HTTP State Management Mechanism (cookie stuff)
- Also, read Netscape's specification at
http://curl.haxx.se/rfc/cookie_spec.html
RFC 2183 - The Content-Disposition Header Field
RFC 2195 - CRAM-MD5 authentication
RFC 2229 - A Dictionary Server Protocol
RFC 2255 - Newer LDAP URL syntax document.
@ -50,7 +46,7 @@ This document lists documents and standards used by curl.
Character Sets, Languages, and Continuations
RFC 2388 - "Returning Values from Forms: multipart/form-data"
Use this as an addition to the RFC1867
Use this as an addition to the RFC1867
RFC 2396 - "Uniform Resource Identifiers: Generic Syntax and Semantics" This
one obsoletes RFC 1738, but since RFC 1738 is often mentioned
@ -65,19 +61,12 @@ This document lists documents and standards used by curl.
RFC 2617 - HTTP Authentication
RFC 2718 - Guidelines for new URL Schemes
RFC 2732 - Format for Literal IPv6 Addresses in URL's
RFC 2818 - HTTP Over TLS (TLS is the successor to SSL)
RFC 2821 - SMTP protocol
RFC 2964 - Use of HTTP State Management
RFC 2965 - HTTP State Management Mechanism. Cookies. Obsoletes RFC2109
RFC 3207 - SMTP over TLS
RFC 4616 - PLAIN authentication
RFC 4954 - SMTP Authentication

View File

@ -78,7 +78,7 @@ server, do one of the following:
5. all directories along %PATH%
5. Get a better/different/newer CA cert bundle! One option is to extract the
one a recent Firefox browser uses by running 'make ca-bundle' in the curl
one a recent Mozilla browser uses by running 'make ca-bundle' in the curl
build tree root, or possibly download a version that was generated this
way for you:
@ -89,28 +89,3 @@ certificate that isn't signed by one of the certificates in the installed CA
cert bundle, will cause SSL to report an error ("certificate verify failed")
during the handshake and SSL will then refuse further communication with that
server.
Peer SSL Certificate Verification with NSS
==========================================
If libcurl is build with NSS support then depending on the OS distribution it
is probably required to take some additional steps to use the system-wide CA
cert db. RedHat ships with an additional module libnsspem.so which enables NSS
to read the OpenSSL PEM CA bundle. With OpenSuSE this lib is missing, and NSS
can only work with its own internal formats. Also NSS got a new database
format:
https://wiki.mozilla.org/NSS_Shared_DB
Starting with version 7.19.7 libcurl will check for the NSS version it runs,
and add automatically the 'sql:' prefix to the certdb directory (either the
hardcoded default /etc/pki/nssdb or the directory configured with SSL_DIR
environment variable) if a version 3.12.0 or later is detected.
To check which certdb format your distribution provides examine the default
certdb location /etc/pki/nssdb; the new certdb format can be identified by
the filenames cert9.db, key4.db, pkcs11.txt; filenames of older versions are
cert8.db, key3.db, modsec.db.
Usually these cert databases are empty; but NSS also has built-in CAs which are
provided through a shared library libnssckbi.so; if you want to use these
built-in CAs then create a symlink to libnssckbi.so in /etc/pki/nssdb:
ln -s /usr/lib[64]/libnssckbi.so /etc/pki/nssdb/libnssckbi.so

View File

@ -4,60 +4,38 @@
If you have contributed but are missing here, please let us know!
Aaron Oneal
Aaron Orenstein
Adam D. Moss
Adam Light
Adam Piggott
Adam Tkac
Adrian Schuur
Adriano Meirelles
Ajit Dhumale
Akos Pasztory
Alan Pinstein
Albert Chin
Albert Chin-A-Young
Albert Choy
Ale Vesely
Alejandro Alvarez
Aleksandar Milivojevic
Alessandro Ghedini
Alessandro Vesely
Alex Bligh
Alex Fishman
Alex Gruz
Alex Neblett
Alex Suykov
Alex Vinnik
Alex aka WindEagle
Alexander Beedie
Alexander Kourakos
Alexander Krasnostavsky
Alexander Lazic
Alexander Zhuravlev
Alexey Borzov
Alexey Pesternikov
Alexey Simak
Alexey Zakhlestin
Alexis Carvalho
Alfred Gebert
Allen Pulsifer
Amol Pattekar
Amr Shahin
Anatoli Tubman
Anders Gustafsson
Andi Jahja
Andre Guibert de Bruet
Andreas Damm
Andreas Faerber
Andreas Farber
Andreas Ntaflos
Andreas Olsson
Andreas Rieke
Andreas Schuldei
Andreas Wurf
Andrei Benea
Andrei Cipu
Andres Garcia
Andrew Benham
Andrew Biggs
@ -66,105 +44,60 @@ Andrew Francis
Andrew Fuller
Andrew Moise
Andrew Wansink
Andrew de los Reyes
Andrés García
Andrés García
Andy Cedilnik
Andy Serpa
Andy Tsouladze
Angus Mackay
Anthony Bryan
Anthony G. Basile
Antoine Calando
Anton Bychkov
Anton Kalmykov
Anton Malov
Anton Yabchinskiy
Arkadiusz Miskiewicz
Armel Asselin
Arnaud Compan
Arnaud Ebalard
Arthur Murray
Arve Knudsen
Ates Goral
Augustus Saunders
Avery Fay
Axel Tillequin
Balint Szilakszi
Bart Whiteley
Bas Mevissen
Ben Darnell
Ben Greear
Ben Madsen
Ben Noordhuis
Ben Van Hof
Ben Winslow
Benbuck Nason
Benjamin Gerard
Benjamin Johnson
Bernard Leak
Bernhard Reutner-Fischer
Bertrand Demiddelaer
Bill Egert
Bill Hoffman
Bjoern Sikora
Bjorn Augustsson
Bjorn Reese
Björn Stenberg
Blaise Potard
Bob Richmond
Björn Stenberg
Bob Schader
Bogdan Nicula
Brad Burdick
Brad Hards
Brad King
Bradford Bruce
Brandon Wang
Brendan Jurd
Brent Beardsley
Brian Akins
Brian Dessent
Brian J. Murrell
Brian R Duffy
Brian Ulm
Brock Noland
Bruce Mitchener
Bryan Henderson
Bryan Kemp
Cameron Kaiser
Camille Moncelier
Caolan McNamara
Carsten Lange
Casey O'Donnell
Cedric Deltheil
Chad Monroe
Chandrakant Bagul
Charles Kerr
Chih-Chung Chang
Chris "Bob Bob"
Chris Combes
Chris Conroy
Chris Deidun
Chris Flerackers
Chris Gaukroger
Chris Maltby
Chris Mumford
Chris Smowton
Christian Grothoff
Christian Hagele
Christian Hägele
Christian Krause
Christian Kurz
Christian Robottom Reis
Christian Schmitz
Christian Vogt
Christophe Demory
Christophe Legry
Christopher Conroy
Christopher Palow
Christopher R. Palmer
Christopher Stone
Ciprian Badescu
Claes Jakobsson
Clarence Gardner
Clifford Wolf
Cody Jones
@ -173,21 +106,14 @@ Colin Watson
Colm Buckley
Constantine Sapuntzakis
Cory Nelson
Craig A West
Craig Davison
Craig Markwardt
Cris Bailiff
Cristian Rodriguez
Cristian Rodríguez
Curt Bogmine
Cyrill Osterwalder
Dag Ekengren
Dagobert Michelsen
Damien Adant
Dan Becker
Dan C
Dan Fandrich
Dan Locks
Dan Nelson
Dan Petitt
Dan Torop
@ -195,23 +121,17 @@ Dan Zitter
Daniel Black
Daniel Cater
Daniel Egger
Daniel Fandrich
Daniel Johnson
Daniel Mentz
Daniel Steinberg
Daniel Stenberg
Daniel Theron
Daniel at touchtunes
Darryl House
Darshan Mody
Dave Dribin
Dave Halbakken
Dave Hamilton
Dave May
Dave Reisner
Dave Vasilevsky
David Bau
David Binderman
David Blaikie
David Byron
David Cohen
David Eriksson
@ -219,7 +139,6 @@ David Houlder
David Hull
David J Meyer
David James
David Kierznowski
David Kimdon
David Lang
David LeBlanc
@ -234,15 +153,12 @@ David Wright
David Yan
Dengminwen
Detlef Schmier
Didier Brisebourg
Diego Casorran
Dima Barsky
Dimitre Dimitrov
Dimitris Sarris
Dinar
Dirk Eddelbuettel
Dirk Manske
Dmitri Shubin
Dmitriy Sergeyev
Dmitry Bartsevich
Dmitry Kurochkin
@ -251,13 +167,10 @@ Dmitry Rechkin
Dolbneff A.V
Domenico Andreoli
Dominick Meglio
Dominique Leuenberger
Doug Kaufman
Doug Porter
Douglas E. Wegscheid
Douglas Kilpatrick
Douglas R. Horner
Douglas Steinwand
Dov Murik
Duane Cathey
Duncan Mac-Vicar Prett
@ -265,11 +178,8 @@ Dustin Boswell
Dylan Ellicott
Dylan Salisbury
Early Ehlinger
Ebenezer Ikonne
Edin Kadribasic
Eduard Bloch
Edward Sheldrake
Eelco Dolstra
Eetu Ojanen
Ellis Pritchard
Emanuele Bovisio
@ -278,15 +188,12 @@ Emiliano Ida
Enrico Scholz
Enrik Berkhan
Eric Cooper
Eric Hu
Eric Landes
Eric Lavigne
Eric Melville
Eric Mertens
Eric Rautman
Eric Thelin
Eric Vergnaud
Eric Wong
Eric Young
Erick Nuwendam
Erwan Legrand
@ -294,8 +201,6 @@ Erwin Authried
Eugene Kotlyarov
Evan Jordan
Eygene Ryabinkin
Fabian Hiernaux
Fabian Keil
Fabrizio Ammollo
Fedor Karpelevitch
Felix von Leitner
@ -304,18 +209,10 @@ Florian Schoppmann
Forrest Cahoon
Frank Hempel
Frank Keeney
Frank McGeough
Frank Meier
Frank Ticheler
Frank Van Uffelen
František Kučera
Fred Machado
Fred New
Fred Noz
Frederic Lepied
Gabriel Kuri
Gabriel Sjoberg
Garrett Holmstrom
Gary Maxwell
Gautam Kachroo
Gautam Mani
@ -327,11 +224,9 @@ Georg Lippitsch
Georg Wicherski
Gerd v. Egidy
Gerhard Herre
Gerrit Bruchhäuser
Ghennadi Procopciuc
Gerrit Bruchhäuser
Giancarlo Formicuccia
Giaslas Georgios
Gil Weber
Gilad
Gilbert Ramirez Jr.
Gilles Blanc
@ -340,20 +235,17 @@ Giuseppe Attardi
Giuseppe D'Ambrosio
Glen Nakamura
Glen Scott
Gokhan Sengun
Grant Erickson
Greg Hewgill
Greg Morse
Greg Onufer
Greg Zavertnik
Grigory Entin
Guenole Bescon
Guenter Knauf
Guido Berhoerster
Guillaume Arluison
Gustaf Hui
Gwenole Beauchesne
Götz Babin-Ebell
Götz Babin-Ebell
Günter Knauf
Hamish Mackenzie
Hang Kin Lau
Hanno Kranzhoff
@ -361,20 +253,9 @@ Hans Steegers
Hans-Jurgen May
Hardeep Singh
Harshal Pradhan
Hauke Duden
Heikki Korpela
Heinrich Ko
Hendrik Visage
Henrik Storner
Henry Ludemann
Herve Amblard
Hidemoto Nakada
Ho-chi Chen
Hoi-Ho Chan
Hongli Lai
Howard Chu
Hzhijun
Ian D Allen
Ian Ford
Ian Gulliver
Ian Lynagh
@ -390,13 +271,9 @@ Immanuel Gregoire
Ingmar Runge
Ingo Ralf Blum
Ingo Wilken
Jack Zhang
Jacky Lam
Jacob Meuser
Jacob Moshenko
Jad Chamcham
James Bursa
James Cheng
James Clancy
James Cone
James Gallagher
@ -406,16 +283,9 @@ James MacMillan
Jamie Lokier
Jamie Newton
Jamie Wilkinson
Jan Ehrhardt
Jan Koen Annot
Jan Kunder
Jan Schaumann
Jan Van Boghout
Jared Lundell
Jari Sundell
Jason Glasgow
Jason Liu
Jason McDonald
Jason S. Priebe
Jay Austin
Jayesh A Shah
@ -426,7 +296,6 @@ Jean-Francois Bertrand
Jean-Louis Lemaire
Jean-Marc Ranger
Jean-Philippe Barrette-LaPierre
Jeff Connelly
Jeff Johnson
Jeff Lawson
Jeff Phillips
@ -435,79 +304,47 @@ Jeff Weber
Jeffrey Pohlmeyer
Jeremy Friesner
Jerome Muffat-Meridol
Jerome Vouillon
Jerry Wu
Jes Badwal
Jesper Jensen
Jesse Noller
Jie He
Jim Drash
Jim Freeman
Jim Hollinger
Jim Meyering
Jocelyn Jaubert
Joe Halpin
Joe Malicki
Joe Mason
Joel Chen
Jofell Gallardo
Johan Anderson
Johan Nilsson
Johan van Selst
Johannes Bauer
John Bradshaw
John Crow
John Dennis
John E. Malmberg
John Janssen
John Joseph Bachir
John Kelly
John Lask
John Lightsey
John Marino
John McGowan
John P. McCaskey
John Suprock
John Wilkinson
John-Mark Bell
Johnny Luong
Jon Grubbs
Jon Nelson
Jon Sargeant
Jon Travis
Jon Turner
Jonas Forsman
Jonas Schnelli
Jonatan Lander
Jonathan Hseu
Jonathan Nieder
Jongki Suwandi
Jose Kahan
Josef Wolf
Josh Kapell
Joshua Kwan
Josue Andrade Gomes
Juan Barreto
Juan F. Codagnone
Juan Ignacio Hervás
Juan Ignacio Hervás
Judson Bishop
Juergen Wilke
Jukka Pihl
Julian Noble
Julian Taylor
Julien Chaffraix
Julien Royer
Jun-ichiro itojun Hagino
Jurij Smakov
Justin Fletcher
Jörg Mueller-Tolk
Jörn Hartroth
Jörg Mueller-Tolk
Jörn Hartroth
Kai Sommerfeld
Kai-Uwe Rommel
Kalle Vahlman
Kamil Dudka
Kang-Jin Lee
Karl M
Karl Moerder
Karol Pietrzak
Kaspar Brand
@ -518,10 +355,7 @@ Keith McGuigan
Keith Mok
Ken Hirsch
Ken Rastatter
Kenny To
Kent Boortz
Keshav Krity
Kevin Baughman
Kevin Fisk
Kevin Lussier
Kevin Reed
@ -533,133 +367,87 @@ Kjetil Jacobsen
Klevtsov Vadim
Kris Kennaway
Krishnendu Majumdar
Krister Johansen
Kristian Gunstone
Kristian Köhntopp
Kristian Köhntopp
Kyle Sallee
Lachlan O'Dea
Larry Campbell
Larry Fahnoe
Lars Buitinck
Lars Gustafsson
Lars J. Aas
Lars Nilsson
Lars Torben Wilson
Lau Hang Kin
Laurent Rabret
Legoff Vincent
Lehel Bernadt
Len Krause
Lenaic Lefever
Lenny Rachitsky
Liam Healy
Lijo Antony
Linas Vepstas
Ling Thio
Linus Nielsen Feltzing
Lisa Xu
Liza Alenchery
Loic Dachary
Loren Kirkby
Luca Altea
Luca Alteas
Lucas Adamski
Lukasz Czekierda
Luke Amery
Luke Call
Luong Dinh Dung
Maciej Karpiuk
Maciej W. Rozycki
Mamoru Tasaka
Mandy Wu
Manfred Schwarb
Manuel Massing
Marc Boucher
Marc Hoersken
Marc Kleine-Budde
Marcel Raad
Marcel Roelofs
Marcelo Juchem
Marcin Adamski
Marcin Konicki
Marco G. Salvagno
Marco Maggi
Marcus Sundberg
Marcus Webster
Mario Schroeder
Mark Brand
Mark Butler
Mark Davies
Mark Eichin
Mark Incley
Mark Karpeles
Mark Lentczner
Mark Salisbury
Mark Snelling
Mark Tully
Markus Duft
Markus Koetter
Markus Moeller
Markus Oberhumer
Martijn Koster
Martin C. Martin
Martin Drasar
Martin Hager
Martin Hedenfalk
Martin Lemke
Martin Skinner
Martin Storsjo
Marty Kuhrt
Maruko
Massimiliano Ziccardi
Massimo Callegari
Mateusz Loskot
Mathias Axelsson
Mats Lidell
Matt Kraai
Matt Veenstra
Matt Witherspoon
Matt Wixson
Matteo Rocco
Matthew Blain
Matthew Clarke
Matthias Bolte
Maurice Barnum
Mauro Iorio
Max Katsev
Maxim Ivanov
Maxim Perenesenko
Maxim Prohorov
Maxime Larocque
Mehmet Bozkurt
Mekonikum
Mettgut Jamalla
Michael Benedict
Michael Calmer
Michael Cronenworth
Michael Curtis
Michael Day
Michael Goffioul
Michael Jahn
Michael Jerris
Michael Mealling
Michael Mueller
Michael Smith
Michael Stillwell
Michael Wallner
Michal Bonino
Michal Gorny
Michal Kowalczyk
Michal Marek
Michele Bini
Mihai Ionescu
Mikael Johansson
Mikael Sennerholm
Mike Bytnar
Mike Crowe
Mike Dobbs
Mike Hommey
Mike Power
Mike Protts
Mike Revi
Miklos Nemeth
@ -670,72 +458,49 @@ Moonesamy
Nathan Coulter
Nathan O'Sullivan
Nathanael Nerode
Naveen Chandran
Naveen Noel
Neil Bowers
Neil Dunbar
Neil Spring
Nic Roets
Nicholas Maniscalco
Nick Gimbrone
Nick Humfrey
Nick Zitzmann
Nico Baggus
Nicolas Berloquin
Nicolas Croiset
Nicolas François
Nicolas François
Niels van Tongeren
Nikita Schmidt
Nikitinskit Dmitriy
Niklas Angebrand
Nikolai Kondrashov
Nikos Mavrogiannopoulos
Ning Dong
Nir Soffer
Nis Jorgensen
Nodak Sodak
Norbert Frese
Norbert Novotny
Ofer
Olaf Flebbe
Olaf Stueben
Olaf Stüben
Olivier Berger
Olaf Stüben
Oren Tirosh
Ori Avtalion
Oscar Koeroo
Oscar Norlander
P R Schaffner
Paolo Piacentini
Pascal Terjan
Pasha Kuznetsov
Pat Ray
Patrice Guerin
Patrick Bihan-Faou
Patrick Monnerat
Patrick Scott
Patrick Smith
Patrik Thunstrom
Pau Garcia i Quiles
Paul Harrington
Paul Howarth
Paul Marquis
Paul Moore
Paul Nolan
Paul Querna
Pavel Cenek
Pavel Orehov
Pavel Raiskup
Pawel A. Gajda
Pawel Kierski
Pedro Larroy
Pedro Neves
Pete Su
Peter Bray
Peter Forret
Peter Heuchert
Peter Hjalmarsson
Peter Korsgaard
Peter Lamberg
Peter O'Gorman
Peter Pentchev
@ -748,46 +513,31 @@ Peter Wullinger
Peteris Krumins
Phil Blundell
Phil Karn
Phil Lisiecki
Phil Pellouchoud
Philip Craig
Philip Gladstone
Philip Langdale
Philippe Hameau
Philippe Raoult
Philippe Vaucher
Pierre
Pierre Brico
Pierre Chapuis
Pierre Joye
Pierre Ynard
Pooyan McSporran
Pramod Sharma
Puneet Pawaia
Quagmire
Quanah Gibson-Mount
Quinn Slack
Rafa Muyo
Rafael Sagula
Rainer Canavan
Rainer Koenig
Rajesh Naganathan
Ralf S. Engelschall
Ralph Beckmann
Ralph Mitchell
Ramana Mokkapati
Randy McMurchy
Ravi Pratap
Ray Dassen
Ray Pekowski
Reinout van Schouwen
Renato Botelho
Renaud Chaillat
Renaud Duhaut
Rene Bernhardt
Rene Rebe
Reuven Wachtfogel
Reza Arbab
Ricardo Cadime
Rich Gray
Rich Rauenzahn
@ -798,75 +548,53 @@ Richard Clayton
Richard Cooper
Richard Gorton
Richard Prescott
Richard Silverman
Rick Jones
Rick Richardson
Rob Crittenden
Rob Jones
Rob Stanzel
Rob Ward
Robert A. Monat
Robert B. Harris
Robert D. Young
Robert Foreman
Robert Iakobashvili
Robert Olson
Robert Schumann
Robert Weaver
Robin Cornelius
Robin Johnson
Robin Kay
Robson Braga Araujo
Rodney Simmons
Rodrigo Silva
Roland Blom
Roland Krikava
Roland Zimmermann
Rolland Dudemaine
Roman Koifman
Roman Mamedov
Ron Zapp
Rosimildo da Silva
Roy Shan
Rune Kleveland
Ruslan Gazizov
Rutger Hofman
Ryan Chan
Ryan Nelson
Ryan Schmidt
S. Moonesamy
Salvador Dávila
Salvador Dávila
Salvatore Sorrentino
Sam Listopad
Sampo Kellomaki
Samuel Díaz García
Samuel Díaz García
Samuel Listopad
Samuel Thibault
Sander Gates
Sandor Feldi
Santhana Todatry
Saqib Ali
Sara Golemon
Saul good
Scott Bailey
Scott Barrett
Scott Cantor
Scott Davis
Scott McCreary
Sebastian Rasmussen
Sebastien Willemijns
Senthil Raja Velu
Sergei Nikulov
Sergio Ballestrero
Seshubabu Pasam
Sh Diao
Sharad Gupta
Shard
Shawn Poulson
Shmulik Regev
Siddhartha Prakash Jain
Sidney San Martin
Siegfried Gyuricsko
Simon Dick
Simon Josefsson
Simon Liu
@ -875,11 +603,8 @@ Sonia Subramanian
Spacen Jasset
Spiridonoff A.V
Stadler Stephan
Stan van de Burgt
Stefan Esser
Stefan Krause
Stefan Teleman
Stefan Tomanek
Stefan Ulrich
Stephan Bergmann
Stephen Collyer
@ -887,8 +612,6 @@ Stephen Kick
Stephen More
Sterling Hughes
Steve Green
Steve H Truong
Steve Holme
Steve Lhomme
Steve Little
Steve Marx
@ -896,68 +619,44 @@ Steve Oliphant
Steve Roskowski
Steven Bazyl
Steven G. Johnson
Steven M. Schweda
Steven Parkes
Stoned Elipot
Sven Anders
Sven Neuhaus
Sven Wegener
Sébastien Willemijns
Sébastien Willemijns
T. Bharath
T. Yamada
Taneli Vahakangas
Tanguy Fautre
Tatsuhiro Tsujikawa
Temprimus
Thomas J. Moore
Thomas Klausner
Thomas L. Shinnick
Thomas Lopatic
Thomas Schwinge
Thomas Tonino
Tim Ansell
Tim Baker
Tim Bartley
Tim Chen
Tim Costello
Tim Harder
Tim Heckman
Tim Newsome
Tim Sneddon
Tinus van den Berg
Tobias Rundström
Tobias Rundström
Toby Peterson
Todd A Ouska
Todd Kulesza
Todd Ouska
Todd Vierling
Tom Benoist
Tom Donovan
Tom Lee
Tom Mattison
Tom Moers
Tom Mueller
Tom Regner
Tom Wright
Tom Zerucha
Tomas Mlcoch
Tomas Pospisek
Tomas Szepe
Tomasz Lacki
Tommie Gannert
Tommy Tam
Ton Voon
Toni Moreno
Toon Verwaest
Tor Arntsen
Torsten Foertsch
Toshio Kuratomi
Toshiyuki Maezawa
Traian Nicolescu
Troels Walsted Hansen
Troy Engel
Tupone Alfredo
Ulf Härnhammar
Ulf Härnhammar
Ulrich Zadow
Venkat Akella
Victor Snezhko
@ -967,29 +666,20 @@ Vincent Bronner
Vincent Le Normand
Vincent Penquerc'h
Vincent Sanders
Vincent Torri
Vlad Grachov
Vlad Ureche
Vladimir Grishchenko
Vladimir Lazarenko
Vojtech Janota
Vojtech Minarik
Vsevolod Novikov
Walter J. Mack
Ward Willats
Wayne Haigh
Werner Koch
Wesley Laxton
Wesley Miaw
Wez Furlong
Wilfredo Sanchez
Wojciech Zwiefka
Wu Yongzheng
Xavier Bouchoux
Yang Tse
Yarram Sunil
Yehoshua Hershberg
Yukihiro Kawada
Yuriy Sosov
Yves Lejeune
Zmey Petroff

View File

@ -1,7 +1,7 @@
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
Things that could be nice to do in the future
@ -12,32 +12,31 @@
All bugs documented in the KNOWN_BUGS document are subject for fixing!
1. libcurl
1.1 Zero-copy interface
1.2 More data sharing
1.3 struct lifreq
1.4 signal-based resolver timeouts
1.5 get rid of PATH_MAX
1.6 progress callback without doubles
1.7 Happy Eyeball dual stack connect
2. libcurl - multi interface
2.1 More non-blocking
2.2 Fix HTTP Pipelining for PUT
2.2 Remove easy interface internally
2.3 Avoid having to remove/readd handles
3. Documentation
3.1 More and better
4. FTP
4.1 HOST
4.1 PRET
4.2 Alter passive/active on failure and retry
4.3 Earlier bad letter detection
4.4 REST for large files
4.5 FTP proxy support
4.6 ASCII support
4.6 PORT port range
4.7 ASCII support
5. HTTP
5.1 Better persistency for HTTP 1.0
5.2 support FF3 sqlite cookie files
5.3 Rearrange request header order
6. TELNET
6.1 ditch stdin
@ -53,72 +52,68 @@
7.5 Export session ids
7.6 Provide callback for cert verification
7.7 Support other SSL libraries
7.8 Support SRP on the TLS layer
7.9 improve configure --with-ssl
7.10 Support DANE
8. GnuTLS
8.1 SSL engine stuff
8.3 check connection
8.1 Make NTLM work without OpenSSL functions
8.2 SSL engine stuff
8.3 SRP
8.4 non-blocking
8.5 check connection
9. SMTP
9.1 Specify the preferred authentication mechanism
9.2 Initial response
9.3 Pipelining
10. POP3
10.1 auth= in URLs
11. LDAP
11.1 SASL based authentication mechanisms
12. Other protocols
9. Other protocols
9.1 ditch ldap-specific select
13. New protocols
13.1 RSYNC
10. New protocols
10.1 RTSP
10.2 RSYNC
10.3 RTMP
14. SASL
14.1 Other authentication mechanisms
15. Client
15.1 sync
15.2 glob posts
15.3 prevent file overwriting
15.4 simultaneous parallel transfers
15.5 provide formpost headers
15.6 url-specific options
15.7 warning when setting an option
15.8 IPv6 addresses with globbing
11. Client
11.1 Content-Disposition
11.2 sync
11.3 glob posts
11.4 prevent file overwriting
11.5 ftp wildcard download
11.6 simultaneous parallel transfers
11.7 provide formpost headers
11.8 url-specific options
11.9 metalink support
11.10 warning when setting an option
16. Build
16.1 roffit
12. Build
12.1 roffit
17. Test suite
17.1 SSL tunnel
17.2 nicer lacking perl message
17.3 more protocols supported
17.4 more platforms supported
13. Test suite
13.1 SSL tunnel
13.2 nicer lacking perl message
13.3 more protocols supported
13.4 more platforms supported
18. Next SONAME bump
18.1 http-style HEAD output for ftp
18.2 combine error codes
18.3 extend CURLOPT_SOCKOPTFUNCTION prototype
14. Next SONAME bump
14.1 http-style HEAD output for ftp
14.2 combine error codes
14.3 extend CURLOPT_SOCKOPTFUNCTION prototype
19. Next major release
19.1 cleanup return codes
19.2 remove obsolete defines
19.3 size_t
19.4 remove several functions
19.5 remove CURLOPT_FAILONERROR
19.6 remove CURLOPT_DNS_USE_GLOBAL_CACHE
19.7 remove progress meter from libcurl
19.8 remove 'curl_httppost' from public
19.9 have form functions use CURL handle argument
19.10 Add CURLOPT_MAIL_CLIENT option
15. Next major release
15.1 cleanup return codes
15.2 remove obsolete defines
15.3 size_t
15.4 remove several functions
15.5 remove CURLOPT_FAILONERROR
15.6 remove CURLOPT_DNS_USE_GLOBAL_CACHE
==============================================================================
1. libcurl
1.1 Zero-copy interface
Introduce another callback interface for upload/download that makes one less
copy of data and thus a faster operation.
[http://curl.haxx.se/dev/no_copy_callbacks.txt]
1.2 More data sharing
curl_share_* functions already exist and work, and they can be extended to
@ -135,7 +130,7 @@
libcurl built without an asynchronous resolver library uses alarm() to time
out DNS lookups. When a timeout occurs, this causes libcurl to jump from the
signal handler back into the library with a sigsetjmp, which effectively
signal handler back into the library with a sigsetjmp, which effectively
causes libcurl to continue running within the signal handler. This is
non-portable and could cause problems on some platforms. A discussion on the
problem is available at http://curl.haxx.se/mail/lib-2008-09/0197.html
@ -143,58 +138,38 @@
Also, alarm() provides timeout resolution only to the nearest second. alarm
ought to be replaced by setitimer on systems that support it.
1.5 get rid of PATH_MAX
Having code use and rely on PATH_MAX is not nice:
http://insanecoding.blogspot.com/2007/11/pathmax-simply-isnt.html
Currently the SSH based code uses it a bit, but to remove PATH_MAX from there
we need libssh2 to properly tell us when we pass in a too small buffer and
its current API (as of libssh2 1.2.7) doesn't.
1.6 progress callback without doubles
The progress callback was introduced way back in the days and the choice to
use doubles in the arguments was possibly good at the time. Today the doubles
only confuse users and make the amounts less precise. We should introduce
another progress callback option that take precedence over the old one and
have both co-exist for a forseeable time until we can remove the double-using
one.
1.7 Happy Eyeball dual stack connect
In order to make alternative technologies not suffer when transitioning, like
when introducing IPv6 as an alternative to IPv4 and there are more than one
option existing simultaneously there are reasons to reconsider internal
choices.
To make libcurl do blazing fast IPv6 in a dual-stack configuration, this needs
to be addressed:
http://tools.ietf.org/html/rfc6555
2. libcurl - multi interface
2.1 More non-blocking
Make sure we don't ever loop because of non-blocking sockets returning
EWOULDBLOCK or similar. Blocking cases include:
EWOULDBLOCK or similar. The GnuTLS connection etc.
- Name resolves on non-windows unless c-ares is used
- NSS SSL connections
- HTTP proxy CONNECT operations
- SOCKS proxy handshakes
- file:// transfers
- TELNET transfers
- The "DONE" operation (post transfer protocol-specific actions) for the
protocols SFTP, SMTP, FTP. Fixing Curl_done() for this is a worthy task.
2.2 Remove easy interface internally
2.2 Fix HTTP Pipelining for PUT
Make curl_easy_perform() a wrapper-function that simply creates a multi
handle, adds the easy handle to it, runs curl_multi_perform() until the
transfer is done, then detach the easy handle, destroy the multi handle and
return the easy handle's return code. This will thus make everything
internally use and assume the multi interface. The select()-loop should use
curl_multi_socket().
HTTP Pipelining can be a way to greatly enhance performance for multiple
serial requests and currently libcurl only supports that for HEAD and GET
requests but it should also be possible for PUT.
2.3 Avoid having to remove/readd handles
curl_multi_handle_control() - this can control the easy handle (while) added
to a multi handle in various ways:
o RESTART, unconditionally restart this easy handle's transfer from the
start, re-init the state
o RESTART_COMPLETED, restart this easy handle's transfer but only if the
existing transfer has already completed and it is in a "finished state".
o STOP, just stop this transfer and consider it completed
o PAUSE?
o RESUME?
3. Documentation
@ -204,12 +179,12 @@
4. FTP
4.1 HOST
4.1 PRET
HOST is a suggested command in the works for a client to tell which host name
to use, to offer FTP servers named-based virtual hosting:
http://tools.ietf.org/html/draft-hethmon-mcmurray-ftp-hosts-11
PRET is a command that primarily "drftpd" supports, which could be useful
when using libcurl against such a server. It is a non-standard and a rather
oddly designed command, but...
http://curl.haxx.se/bug/feature.cgi?id=1729967
4.2 Alter passive/active on failure and retry
@ -235,7 +210,13 @@
from ncftp. This is not a subject without debate, and is probably not really
suitable for libcurl. http://curl.haxx.se/mail/archive-2003-04/0126.html
4.6 ASCII support
4.6 PORT port range
Make CURLOPT_FTPPORT support an additional port number on the IP/if/name,
like "blabla:[port]" or possibly even "blabla:[portfirst]-[portsecond]".
http://curl.haxx.se/bug/feature.cgi?id=1505166
4.7 ASCII support
FTP ASCII transfers do not follow RFC959. They don't convert the data
accordingly.
@ -253,19 +234,6 @@
We should consider how (lib)curl can/should support this.
http://curl.haxx.se/bug/feature.cgi?id=1871388
5.3 Rearrange request header order
Server implementors often make an effort to detect browser and to reject
clients it can detect to not match. One of the last details we cannot yet
control in libcurl's HTTP requests, which also can be exploited to detect
that libcurl is in fact used even when it tries to impersonate a browser, is
the order of the request headers. I propose that we introduce a new option in
which you give headers a value, and then when the HTTP request is built it
sorts the headers based on that number. We could then have internally created
headers use a default value so only headers that need to be moved have to be
specified.
6. TELNET
6.1 ditch stdin
@ -337,86 +305,83 @@ to provide the data to send.
Make curl's SSL layer capable of using other free SSL libraries. Such as
MatrixSSL (http://www.matrixssl.org/).
7.8 Support SRP on the TLS layer
Peter Sylvester's patch for SRP on the TLS layer. Awaits OpenSSL support for
this, no need to support this in libcurl before there's an OpenSSL release
that does it.
7.9 improve configure --with-ssl
make the configure --with-ssl option first check for OpenSSL, then GnuTLS,
then NSS...
7.10 Support DANE
DNS-Based Authentication of Named Entities (DANE) is a way to provide SSL
keys and certs over DNS using DNSSEC as an alternative to the CA model.
http://www.rfc-editor.org/rfc/rfc6698.txt
8. GnuTLS
8.1 SSL engine stuff
8.1 Make NTLM work without OpenSSL functions
Get NTLM working using the functions provided by libgcrypt, since GnuTLS
already depends on that to function. Not strictly SSL/TLS related, but
hey... Another option is to get available DES and MD4 source code from the
cryptopp library. They are fine license-wise, but are C++.
8.2 SSL engine stuff
Is this even possible?
8.3 check connection
8.3 SRP
Work out a common method with Peter Sylvester's OpenSSL-patch for SRP on the
TLS to provide name and password. GnuTLS already supports it...
8.4 non-blocking
Fix the connection phase to be non-blocking when multi interface is used
8.5 check connection
Add a way to check if the connection seems to be alive, to correspond to the
SSL_peak() way we use with OpenSSL.
9. Other protocols
9. SMTP
9.1 ditch ldap-specific select
9.1 Specify the preferred authentication mechanism
* Look over the implementation. The looping will have to "go away" from the
lib/ldap.c source file and get moved to the main network code so that the
multi interface and friends will work for LDAP as well.
Add the ability to specify the preferred authentication mechanism or a list
of mechanisms that should be used. Not only that, but the order that is
returned by the server during the EHLO response should be honored by curl.
9.2 stop TFTP blocking
9.2 Initial response
Stop TFTP from being blocking and doing its own read loop in tftp_do.
Add the ability for the user to specify whether the initial response is
included in the AUTH command. Some email servers, such as Microsoft
Exchange, can work with either whilst others need to have the initial
response sent separately:
10. New protocols
http://curl.haxx.se/mail/lib-2012-03/0114.html
10.1 RTSP
9.3 Pipelining
RFC2326 (protocol - very HTTP-like, also contains URL description)
Add support for pipelining emails.
10.2 RSYNC
10. POP3
There's no RFC for protocol nor URI/URL format. An implementation should
most probably use an existing rsync library, such as librsync.
10.1 auth= in URLs
10.3 RTMP
Being able to specify the preferred authentication mechanism in the URL as
per RFC2384.
There exists a patch that claims to introduce this protocol:
http://osdir.com/ml/gnu.gnash.devel2/2006-11/msg00278.html, further details
in the feature-request: http://curl.haxx.se/bug/feature.cgi?id=1843469
11. LDAP
11. Client
11.1 SASL based authentication mechanisms
11.1 Content-Disposition
Currently the LDAP module only supports ldap_simple_bind_s() in order to bind
to an LDAP server. However, this function sends username and password details
using the simple authentication mechanism (as clear text). However, it should
be possible to use ldap_bind_s() instead specifing the security context
information ourselves.
Add option that is similar to -O but that takes the output file name from the
Content-Disposition: header, and/or uses the local file name used in
redirections for the cases the server bounces the request further to a
different file (name): http://curl.haxx.se/bug/feature.cgi?id=1364676
12. Other protocols
13. New protocols
13.1 RSYNC
There's no RFC for the protocol or an URI/URL format. An implementation
should most probably use an existing rsync library, such as librsync.
14. SASL
14.1 Other authentication mechanisms
Add support for gssapi to SMTP, POP3 and IMAP.
15. Client
15.1 sync
11.2 sync
"curl --sync http://example.com/feed[1-100].rss" or
"curl --sync http://example.net/{index,calendar,history}.html"
@ -425,12 +390,12 @@ to provide the data to send.
remote file is newer than the local file. A Last-Modified HTTP date header
should also be used to set the mod date on the downloaded file.
15.2 glob posts
11.3 glob posts
Globbing support for -d and -F, as in 'curl -d "name=foo[0-9]" URL'.
This is easily scripted though.
15.3 prevent file overwriting
11.4 prevent file overwriting
Add an option that prevents cURL from overwriting existing local files. When
used, and there already is an existing file with the target file name
@ -438,14 +403,18 @@ to provide the data to send.
existing). So that index.html becomes first index.html.1 and then
index.html.2 etc.
15.4 simultaneous parallel transfers
11.5 ftp wildcard download
"curl ftp://site.com/*.txt"
11.6 simultaneous parallel transfers
The client could be told to use maximum N simultaneous parallel transfers and
then just make sure that happens. It should of course not make more than one
connection to the same remote host. This would require the client to use the
multi interface. http://curl.haxx.se/bug/feature.cgi?id=1558595
15.5 provide formpost headers
11.7 provide formpost headers
Extending the capabilities of the multipart formposting. How about leaving
the ';type=foo' syntax as it is and adding an extra tag (headers) which
@ -459,7 +428,7 @@ to provide the data to send.
which should overwrite the program reasonable defaults (plain/text,
8bit...)
15.6 url-specific options
11.8 url-specific options
Provide a way to make options bound to a specific URL among several on the
command line. Possibly by letting ':' separate options between URLs,
@ -473,57 +442,55 @@ to provide the data to send.
The example would do a POST-GET-POST combination on a single command line.
15.7 warning when setting an option
11.9 metalink support
Add metalink support to curl (http://www.metalinker.org/). This is most useful
with simultaneous parallel transfers (11.6) but not necessary.
11.10 warning when setting an option
Display a warning when libcurl returns an error when setting an option.
This can be useful to tell when support for a particular feature hasn't been
compiled into the library.
15.8 IPv6 addresses with globbing
12. Build
Currently the command line client needs to get url globbing disabled (with
-g) for it to support IPv6 numerical addresses. This is a rather silly flaw
that should be corrected. It probably involves a smarter detection of the
'[' and ']' letters.
16. Build
16.1 roffit
12.1 roffit
Consider extending 'roffit' to produce decent ASCII output, and use that
instead of (g)nroff when building src/tool_hugehelp.c
instead of (g)nroff when building src/hugehelp.c
17. Test suite
13. Test suite
17.1 SSL tunnel
13.1 SSL tunnel
Make our own version of stunnel for simple port forwarding to enable HTTPS
and FTP-SSL tests without the stunnel dependency, and it could allow us to
provide test tools built with either OpenSSL or GnuTLS
17.2 nicer lacking perl message
13.2 nicer lacking perl message
If perl wasn't found by the configure script, don't attempt to run the tests
but explain something nice why it doesn't.
17.3 more protocols supported
13.3 more protocols supported
Extend the test suite to include more protocols. The telnet could just do ftp
or http operations (for which we have test servers).
17.4 more platforms supported
13.4 more platforms supported
Make the test suite work on more platforms. OpenBSD and Mac OS. Remove
fork()s and it should become even more portable.
18. Next SONAME bump
14. Next SONAME bump
18.1 http-style HEAD output for ftp
14.1 http-style HEAD output for ftp
#undef CURL_FTP_HTTPSTYLE_HEAD in lib/ftp.c to remove the HTTP-style headers
from being output in NOBODY requests over ftp
18.2 combine error codes
14.2 combine error codes
Combine some of the error codes to remove duplicates. The original
numbering should not be changed, and the old identifiers would be
@ -533,44 +500,37 @@ to provide the data to send.
Candidates for removal and their replacements:
CURLE_FILE_COULDNT_READ_FILE => CURLE_REMOTE_FILE_NOT_FOUND
CURLE_FTP_COULDNT_RETR_FILE => CURLE_REMOTE_FILE_NOT_FOUND
CURLE_FTP_COULDNT_USE_REST => CURLE_RANGE_ERROR
CURLE_FUNCTION_NOT_FOUND => CURLE_FAILED_INIT
CURLE_LDAP_INVALID_URL => CURLE_URL_MALFORMAT
CURLE_TFTP_NOSUCHUSER => CURLE_TFTP_ILLEGAL
CURLE_TFTP_NOTFOUND => CURLE_REMOTE_FILE_NOT_FOUND
CURLE_TFTP_PERM => CURLE_REMOTE_ACCESS_DENIED
18.3 extend CURLOPT_SOCKOPTFUNCTION prototype
14.3 extend CURLOPT_SOCKOPTFUNCTION prototype
The current prototype only provides 'purpose' that tells what the
connection/socket is for, but not any protocol or similar. It makes it hard
for applications to differentiate on TCP vs UDP and even HTTP vs FTP and
similar.
10. Next major release
15. Next major release
19.1 cleanup return codes
15.1 cleanup return codes
curl_easy_cleanup() returns void, but curl_multi_cleanup() returns a
CURLMcode. These should be changed to be the same.
19.2 remove obsolete defines
15.2 remove obsolete defines
remove obsolete defines from curl/curl.h
19.3 size_t
15.3 size_t
make several functions use size_t instead of int in their APIs
19.4 remove several functions
15.4 remove several functions
remove the following functions from the public API:
@ -583,59 +543,15 @@ to provide the data to send.
curl_strnequal
They will instead become curlx_ - alternatives. That makes the curl app
still capable of using them, by building with them from source.
still capable of building with them from source.
These functions have no purpose anymore:
curl_multi_socket
curl_multi_socket_all
19.5 remove CURLOPT_FAILONERROR
15.5 remove CURLOPT_FAILONERROR
Remove support for CURLOPT_FAILONERROR, it has gotten too kludgy and weird
internally. Let the app judge success or not for itself.
19.6 remove CURLOPT_DNS_USE_GLOBAL_CACHE
15.6 remove CURLOPT_DNS_USE_GLOBAL_CACHE
Remove support for a global DNS cache. Anything global is silly, and we
already offer the share interface for the same functionality but done
"right".
19.7 remove progress meter from libcurl
The internally provided progress meter output doesn't belong in the library.
Basically no application wants it (apart from curl) but instead applications
can and should do their own progress meters using the progress callback.
The progress callback should then be bumped as well to get proper 64bit
variable types passed to it instead of doubles so that big files work
correctly.
19.8 remove 'curl_httppost' from public
curl_formadd() was made to fill in a public struct, but the fact that the
struct is public is never really used by application for their own advantage
but instead often restricts how the form functions can or can't be modified.
Changing them to return a private handle will benefit the implementation and
allow us much greater freedoms while still maintining a solid API and ABI.
19.9 have form functions use CURL handle argument
curl_formadd() and curl_formget() both currently have no CURL handle
argument, but both can use a callback that is set in the easy handle, and
thus curl_formget() with callback cannot function without first having
curl_easy_perform() (or similar) called - which is hard to grasp and a design
mistake.
19.10 Add CURLOPT_MAIL_CLIENT option
Rather than use the URL to specify the mail client string to present in the
HELO and EHLO commands, libcurl should support a new CURLOPT specifically for
specifing this data as the URL is non-standard and to be honest a bit of a
hack ;-)
Please see the following thread for more information:
http://curl.haxx.se/mail/lib-2012-05/0178.html

View File

@ -1,5 +1,5 @@
Online: http://curl.haxx.se/docs/httpscripting.html
Date: Jan 19, 2011
Date: May 28, 2008
The Art Of Scripting HTTP Requests Using Curl
=============================================
@ -38,26 +38,10 @@ Date: Jan 19, 2011
request a particular action, and then the server replies a few text lines
before the actual requested content is sent to the client.
The client, curl, sends a HTTP request. The request contains a method (like
GET, POST, HEAD etc), a number of request headers and sometimes a request
body. The HTTP server responds with a status line (indicating if things went
well), response headers and most often also a response body. The "body" part
is the plain data you requested, like the actual HTML or the image etc.
1.1 See the Protocol
Using curl's option --verbose (-v as a short option) will display what kind
of commands curl sends to the server, as well as a few other informational
texts.
--verbose is the single most useful option when it comes to debug or even
understand the curl<->server interaction.
Sometimes even --verbose is not enough. Then --trace and --trace-ascii offer
even more details as they show EVERYTHING curl sends and receives. Use it
like this:
curl --trace-ascii debugdump.txt http://www.example.com/
Using curl's option -v will display what kind of commands curl sends to the
server, as well as a few other informational texts. -v is the single most
useful option when it comes to debug or even understand the curl<->server
interaction.
2. URL
@ -77,10 +61,10 @@ Date: Jan 19, 2011
you get a web page returned in your terminal window. The entire HTML document
that that URL holds.
All HTTP replies contain a set of response headers that are normally hidden,
use curl's --include (-i) option to display them as well as the rest of the
document. You can also ask the remote server for ONLY the headers by using
the --head (-I) option (which will make curl issue a HEAD request).
All HTTP replies contain a set of headers that are normally hidden, use
curl's -i option to display them as well as the rest of the document. You can
also ask the remote server for ONLY the headers by using the -I option (which
will make curl issue a HEAD request).
4. Forms
@ -119,7 +103,7 @@ Date: Jan 19, 2011
To make curl do the GET form post for you, just enter the expected created
URL:
curl "http://www.hotmail.com/when/junk.cgi?birthyear=1905&press=OK"
curl "www.hotmail.com/when/junk.cgi?birthyear=1905&press=OK"
4.2 POST
@ -143,8 +127,7 @@ Date: Jan 19, 2011
And to use curl to post this form with the same data filled in as before, we
could do it like:
curl --data "birthyear=1905&press=%20OK%20" \
http://www.example.com/when.cgi
curl -d "birthyear=1905&press=%20OK%20" www.hotmail.com/when/junk.cgi
This kind of POST will use the Content-Type
application/x-www-form-urlencoded and is the most widely used POST kind.
@ -156,7 +139,7 @@ Date: Jan 19, 2011
Recent curl versions can in fact url-encode POST data for you, like this:
curl --data-urlencode "name=I am Daniel" http://www.example.com
curl --data-urlencode "name=I am Daniel" www.example.com
4.3 File Upload POST
@ -177,7 +160,7 @@ Date: Jan 19, 2011
To post to a form like this with curl, you enter a command line like:
curl --form upload=@localfilename --form press=OK [URL]
curl -F upload=@localfilename -F press=OK [URL]
4.4 Hidden Fields
@ -198,7 +181,7 @@ Date: Jan 19, 2011
To post this with curl, you won't have to think about if the fields are
hidden or not. To curl they're all the same:
curl --data "birthyear=1905&press=OK&person=daniel" [URL]
curl -d "birthyear=1905&press=OK&person=daniel" [URL]
4.5 Figure Out What A POST Looks Like
@ -221,7 +204,7 @@ Date: Jan 19, 2011
Put a file to a HTTP server with curl:
curl --upload-file uploadfile http://www.example.com/receive.cgi
curl -T uploadfile www.uploadhttp.com/receive.cgi
6. HTTP Authentication
@ -234,18 +217,18 @@ Date: Jan 19, 2011
To tell curl to use a user and password for authentication:
curl --user name:password http://www.example.com
curl -u name:password www.secrets.com
The site might require a different authentication method (check the headers
returned by the server), and then --ntlm, --digest, --negotiate or even
--anyauth might be options that suit you.
Sometimes your HTTP access is only available through the use of a HTTP
proxy. This seems to be especially common at various companies. A HTTP proxy
may require its own user and password to allow the client to get through to
the Internet. To specify those with curl, run something like:
curl --proxy-user proxyuser:proxypassword curl.haxx.se
curl -U proxyuser:proxypassword curl.haxx.se
If your proxy requires the authentication to be done using the NTLM method,
use --proxy-ntlm, if it requires Digest use --proxy-digest.
@ -274,7 +257,7 @@ Date: Jan 19, 2011
Use curl to set the referer field with:
curl --referer http://www.example.come http://www.example.com
curl -e http://curl.haxx.se daniel.haxx.se
8. User Agent
@ -290,13 +273,13 @@ Date: Jan 19, 2011
is time to set the User Agent field to fool the server into thinking you're
one of those browsers.
To make curl look like Internet Explorer 5 on a Windows 2000 box:
To make curl look like Internet Explorer on a Windows 2000 box:
curl --user-agent "Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)" [URL]
curl -A "Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0)" [URL]
Or why not look like you're using Netscape 4.73 on an old Linux box:
Or why not look like you're using Netscape 4.73 on a Linux (PIII) box:
curl --user-agent "Mozilla/4.73 [en] (X11; U; Linux 2.2.15 i686)" [URL]
curl -A "Mozilla/4.73 [en] (X11; U; Linux 2.2.15 i686)" [URL]
9. Redirects
@ -309,14 +292,13 @@ Date: Jan 19, 2011
such pages in the same manner it display all HTTP replies. It does however
feature an option that will make it attempt to follow the Location: pointers.
To tell curl to follow a Location:
curl --location http://www.example.com
To tell curl to follow a Location:
curl -L www.sitethatredirects.com
If you use curl to POST to a site that immediately redirects you to another
page, you can safely use --location (-L) and --data/--form together. Curl will
only use POST in the first request, and then revert to GET in the following
operations.
page, you can safely use -L and -d/-F together. Curl will only use POST in
the first request, and then revert to GET in the following operations.
10. Cookies
@ -338,16 +320,16 @@ Date: Jan 19, 2011
The simplest way to send a few cookies to the server when getting a page with
curl is to add them on the command line like:
curl --cookie "name=Daniel" http://www.example.com
curl -b "name=Daniel" www.cookiesite.com
Cookies are sent as common HTTP headers. This is practical as it allows curl
to record cookies simply by recording headers. Record cookies with curl by
using the --dump-header (-D) option like:
using the -D option like:
curl --dump-header headers_and_cookies http://www.example.com
curl -D headers_and_cookies www.cookiesite.com
(Take note that the --cookie-jar option described below is a better way to
store cookies.)
(Take note that the -c option described below is a better way to store
cookies.)
Curl has a full blown cookie parsing engine built-in that comes to use if you
want to reconnect to a server and use cookies that were stored from a
@ -355,25 +337,24 @@ Date: Jan 19, 2011
believing you had a previous connection). To use previously stored cookies,
you run curl like:
curl --cookie stored_cookies_in_file http://www.example.com
curl -b stored_cookies_in_file www.cookiesite.com
Curl's "cookie engine" gets enabled when you use the --cookie option. If you
only want curl to understand received cookies, use --cookie with a file that
doesn't exist. Example, if you want to let curl understand cookies from a
page and follow a location (and thus possibly send back cookies it received),
you can invoke it like:
Curl's "cookie engine" gets enabled when you use the -b option. If you only
want curl to understand received cookies, use -b with a file that doesn't
exist. Example, if you want to let curl understand cookies from a page and
follow a location (and thus possibly send back cookies it received), you can
invoke it like:
curl --cookie nada --location http://www.example.com
curl -b nada -L www.cookiesite.com
Curl has the ability to read and write cookie files that use the same file
format that Netscape and Mozilla do. It is a convenient way to share cookies
between browsers and automatic scripts. The --cookie (-b) switch
automatically detects if a given file is such a cookie file and parses it,
and by using the --cookie-jar (-c) option you'll make curl write a new cookie
file at the end of an operation:
between browsers and automatic scripts. The -b switch automatically detects
if a given file is such a cookie file and parses it, and by using the
-c/--cookie-jar option you'll make curl write a new cookie file at the end of
an operation:
curl --cookie cookies.txt --cookie-jar newcookies.txt \
http://www.example.com
curl -b cookies.txt -c newcookies.txt www.cookiesite.com
11. HTTPS
@ -389,7 +370,7 @@ Date: Jan 19, 2011
Curl supports encrypted fetches thanks to the freely available OpenSSL
libraries. To get a page from a HTTPS server, simply run curl like:
curl https://secure.example.com
curl https://that.secure.server.com
11.1 Certificates
@ -400,13 +381,13 @@ Date: Jan 19, 2011
can be specified on the command line or if not, entered interactively when
curl queries for it. Use a certificate with curl on a HTTPS server like:
curl --cert mycert.pem https://secure.example.com
curl -E mycert.pem https://that.secure.server.com
curl also tries to verify that the server is who it claims to be, by
verifying the server's certificate against a locally stored CA cert
bundle. Failing the verification will cause curl to deny the connection. You
must then use --insecure (-k) in case you want to tell curl to ignore that
the server can't be verified.
must then use -k in case you want to tell curl to ignore that the server
can't be verified.
More about server certificate verification and ca cert bundles can be read
in the SSLCERTS document, available online here:
@ -421,18 +402,17 @@ Date: Jan 19, 2011
For example, you can change the POST request to a PROPFIND and send the data
as "Content-Type: text/xml" (instead of the default Content-Type) like this:
curl --data "<xml>" --header "Content-Type: text/xml" \
--request PROPFIND url.com
curl -d "<xml>" -H "Content-Type: text/xml" -X PROPFIND url.com
You can delete a default header by providing one without content. Like you
can ruin the request by chopping off the Host: header:
curl --header "Host:" http://www.example.com
curl -H "Host:" http://mysite.com
You can add headers the same way. Your server may want a "Destination:"
header, and you can add it:
curl --header "Destination: http://nowhere" http://example.com
curl -H "Destination: http://moo.com/nowhere" http://url.com
13. Web Login
@ -463,6 +443,7 @@ Date: Jan 19, 2011
to do a proper login POST. Remember that the contents need to be URL encoded
when sent in a normal POST.
14. Debug
Many times when you run curl on a site, you'll notice that the site doesn't
@ -475,8 +456,8 @@ Date: Jan 19, 2011
* Use the --trace-ascii option to store fully detailed logs of the requests
for easier analyzing and better understanding
* Make sure you check for and use cookies when needed (both reading with
--cookie and writing with --cookie-jar)
* Make sure you check for and use cookies when needed (both reading with -b
and writing with -c)
* Set user-agent to one like a recent popular browser does
@ -498,10 +479,12 @@ Date: Jan 19, 2011
RFC 2616 is a must to read if you want in-depth understanding of the HTTP
protocol.
RFC 3986 explains the URL syntax.
RFC 2396 explains the URL syntax.
RFC 2109 defines how cookies are supposed to work.
RFC 1867 defines the HTTP post upload format.
http://www.openssl.org is the home of the OpenSSL project
http://curl.haxx.se is the home of the cURL project

View File

@ -1,7 +1,7 @@
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
_ _ ____ _
___| | | | _ \| |
/ __| | | | |_) | |
| (__| |_| | _ <| |___
\___|\___/|_| \_\_____|
Version Numbers and Releases
@ -11,25 +11,32 @@ Version Numbers and Releases
The version numbering is always built up using the same system:
X.Y[.Z]
X.Y[.Z][-preN]
Where
X is main version number
Y is release number
Z is patch number
N is pre-release number
One of these numbers will get bumped in each new release. The numbers to the
right of a bumped number will be reset to zero. If Z is zero, it may not be
included in the version number.
included in the version number. The pre release number is only included in
pre releases (they're never used in public, official, releases).
The main version number will get bumped when *really* big, world colliding
changes are made. The release number is bumped when changes are performed or
things/features are added. The patch number is bumped when the changes are
mere bugfixes.
changes are made. The release number is bumped when big changes are
performed. The patch number is bumped when the changes are mere bugfixes and
only minor feature changes. The pre-release is a counter, to identify which
pre-release a certain release is.
When reaching the end of a pre-release period, the version without the
pre-release part will be released as a public release.
It means that after release 1.2.3, we can release 2.0 if something really big
has been made, 1.3 if not that big changes were made or 1.2.4 if mostly bugs
were fixed.
were fixed. Before 1.2.4 is released, we might release a 1.2.4-pre1 release
for the brave people to try before the actual release.
Bumping, as in increasing the number with 1, is unconditionally only
affecting one of the numbers (except the ones to the right of it, that may be
@ -45,16 +52,16 @@ Version Numbers and Releases
have the libcurl version stored in the curl/curlver.h file using a static
numbering scheme that can be used for comparison. The version number is
defined as:
#define LIBCURL_VERSION_NUM 0xXXYYZZ
Where XX, YY and ZZ are the main version, release and patch numbers in
hexadecimal. All three number fields are always represented using two digits
(eight bits each). 1.2 would appear as "0x010200" while version 9.11.7
appears as "0x090b07".
hexadecimal. All three numbers are always represented using two digits. 1.2
would appear as "0x010200" while version 9.11.7 appears as "0x090b07".
This 6-digit hexadecimal number is always a greater number in a more recent
release. It makes comparisons with greater than and less than work.
This 6-digit hexadecimal number does not show pre-release number, and it is
always a greater number in a more recent release. It makes comparisons with
greater than and less than work.
This number is also available as three separate defines:
LIBCURL_VERSION_MAJOR, LIBCURL_VERSION_MINOR and LIBCURL_VERSION_PATCH.

View File

@ -5,7 +5,7 @@
.\" * | (__| |_| | _ <| |___
.\" * \___|\___/|_| \_\_____|
.\" *
.\" * Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
.\" * Copyright (C) 1998 - 2007, Daniel Stenberg, <daniel@haxx.se>, et al.
.\" *
.\" * This software is licensed as described in the file COPYING, which
.\" * you should have received as part of this distribution. The terms
@ -18,6 +18,7 @@
.\" * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
.\" * KIND, either express or implied.
.\" *
.\" * $Id: curl-config.1,v 1.16 2007-10-26 07:46:02 bagder Exp $
.\" **************************************************************************
.\"
.TH curl-config 1 "25 Oct 2007" "Curl 7.17.1" "curl-config manual"
@ -27,7 +28,7 @@ curl-config \- Get information about a libcurl installation
.B curl-config [options]
.SH DESCRIPTION
.B curl-config
displays information about the curl and libcurl installation.
displays information about a previous curl and libcurl installation.
.SH OPTIONS
.IP "--ca"
Displays the built-in path to the CA cert bundle this libcurl uses.
@ -35,19 +36,17 @@ Displays the built-in path to the CA cert bundle this libcurl uses.
Displays the compiler used to build libcurl.
.IP "--cflags"
Set of compiler options (CFLAGS) to use when compiling files that use
libcurl. Currently that is only the include path to the curl include files.
libcurl. Currently that is only thw include path to the curl include files.
.IP "--checkfor [version]"
Specify the oldest possible libcurl version string you want, and this
script will return 0 if the current installation is new enough or it
returns 1 and outputs a text saying that the current version is not new
Specify the oldest possible libcurl version string you want, and this
script will return 0 if the current installation is new enough or it
returns 1 and outputs a text saying that the current version is not new
enough. (Added in 7.15.4)
.IP "--configure"
Displays the arguments given to configure when building curl.
.IP "--feature"
Lists what particular main features the installed libcurl was built with. At
the time of writing, this list may include SSL, KRB4 or IPv6. Do not assume
any particular order. The keywords will be separated by newlines. There may be
none, one, or several keywords in the list.
none, one or several keywords in the list.
.IP "--help"
Displays the available options.
.IP "--libs"
@ -62,7 +61,7 @@ Lists what particular protocols the installed libcurl was built to support. At
the time of writing, this list may include HTTP, HTTPS, FTP, FTPS, FILE,
TELNET, LDAP, DICT. Do not assume any particular order. The protocols will
be listed using uppercase and are separated by newlines. There may be none,
one, or several protocols in the list. (Added in 7.13.0)
one or several protocols in the list. (Added in 7.13.0)
.IP "--static-libs"
Shows the complete set of libs and other linker options you will need in order
to link your application with libcurl statically. (Added in 7.17.1)
@ -93,6 +92,7 @@ What's the installed libcurl version?
How do I build a single file with a one-line command?
$ `curl-config --cc --cflags` -o example example.c `curl-config --libs`
$ `curl-config --cc --cflags --libs` -o example example.c
.SH "SEE ALSO"
.BR curl (1)

View File

@ -1,8 +1,6 @@
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html><head>
<title>curl-config man page</title>
<meta name="generator" content="roffit">
<meta name="generator" content="roffit 0.7">
<STYLE type="text/css">
P.level0 {
padding-left: 2em;
@ -48,20 +46,18 @@ p.roffit {
<p class="level0"><a name="NAME"></a><h2 class="nroffsh">NAME</h2>
<p class="level0">curl-config - Get information about a libcurl installation <a name="SYNOPSIS"></a><h2 class="nroffsh">SYNOPSIS</h2>
<p class="level0"><span Class="bold">curl-config [options]</span> <a name="DESCRIPTION"></a><h2 class="nroffsh">DESCRIPTION</h2>
<p class="level0"><span Class="bold">curl-config</span> displays information about the curl and libcurl installation. <a name="OPTIONS"></a><h2 class="nroffsh">OPTIONS</h2>
<p class="level0"><span Class="bold">curl-config</span> displays information about a previous curl and libcurl installation. <a name="OPTIONS"></a><h2 class="nroffsh">OPTIONS</h2>
<p class="level0">
<p class="level0"><a name="--ca"></a><span class="nroffip">--ca</span>
<p class="level1">Displays the built-in path to the CA cert bundle this libcurl uses.
<p class="level0"><a name="--cc"></a><span class="nroffip">--cc</span>
<p class="level1">Displays the compiler used to build libcurl.
<p class="level0"><a name="--cflags"></a><span class="nroffip">--cflags</span>
<p class="level1">Set of compiler options (CFLAGS) to use when compiling files that use libcurl. Currently that is only the include path to the curl include files.
<p class="level1">Set of compiler options (CFLAGS) to use when compiling files that use libcurl. Currently that is only thw include path to the curl include files.
<p class="level0"><a name="--checkfor"></a><span class="nroffip">--checkfor [version]</span>
<p class="level1">Specify the oldest possible libcurl version string you want, and this script will return 0 if the current installation is new enough or it returns 1 and outputs a text saying that the current version is not new enough. (Added in 7.15.4)
<p class="level0"><a name="--configure"></a><span class="nroffip">--configure</span>
<p class="level1">Displays the arguments given to configure when building curl.
<p class="level1">Specify the oldest possible libcurl version string you want, and this script will return 0 if the current installation is new enough or it returns 1 and outputs a text saying that the current version is not new enough. (Added in 7.15.4)
<p class="level0"><a name="--feature"></a><span class="nroffip">--feature</span>
<p class="level1">Lists what particular main features the installed libcurl was built with. At the time of writing, this list may include SSL, KRB4 or IPv6. Do not assume any particular order. The keywords will be separated by newlines. There may be none, one, or several keywords in the list.
<p class="level1">Lists what particular main features the installed libcurl was built with. At the time of writing, this list may include SSL, KRB4 or IPv6. Do not assume any particular order. The keywords will be separated by newlines. There may be none, one or several keywords in the list.
<p class="level0"><a name="--help"></a><span class="nroffip">--help</span>
<p class="level1">Displays the available options.
<p class="level0"><a name="--libs"></a><span class="nroffip">--libs</span>
@ -69,7 +65,7 @@ p.roffit {
<p class="level0"><a name="--prefix"></a><span class="nroffip">--prefix</span>
<p class="level1">This is the prefix used when libcurl was installed. Libcurl is then installed in $prefix/lib and its header files are installed in $prefix/include and so on. The prefix is set with "configure --prefix".
<p class="level0"><a name="--protocols"></a><span class="nroffip">--protocols</span>
<p class="level1">Lists what particular protocols the installed libcurl was built to support. At the time of writing, this list may include HTTP, HTTPS, FTP, FTPS, FILE, TELNET, LDAP, DICT. Do not assume any particular order. The protocols will be listed using uppercase and are separated by newlines. There may be none, one, or several protocols in the list. (Added in 7.13.0)
<p class="level1">Lists what particular protocols the installed libcurl was built to support. At the time of writing, this list may include HTTP, HTTPS, FTP, FTPS, FILE, TELNET, LDAP, DICT. Do not assume any particular order. The protocols will be listed using uppercase and are separated by newlines. There may be none, one or several protocols in the list. (Added in 7.13.0)
<p class="level0"><a name="--static-libs"></a><span class="nroffip">--static-libs</span>
<p class="level1">Shows the complete set of libs and other linker options you will need in order to link your application with libcurl statically. (Added in 7.17.1)
<p class="level0"><a name="--version"></a><span class="nroffip">--version</span>
@ -85,7 +81,8 @@ p.roffit {
<p class="level0">What's the installed libcurl version?
<p class="level0">&nbsp; $ curl-config --version
<p class="level0">How do I build a single file with a one-line command?
<p class="level0">&nbsp; $ `curl-config --cc --cflags` -o example example.c `curl-config --libs` <a name="SEE"></a><h2 class="nroffsh">SEE ALSO</h2>
<p class="level0">&nbsp; $ `curl-config --cc --cflags --libs` -o example example.c
<p class="level0"><a name="SEE"></a><h2 class="nroffsh">SEE ALSO</h2>
<p class="level0"><span Class="manpage">curl (1)</span> <p class="roffit">
This HTML page was made with <a href="http://daniel.haxx.se/projects/roffit/">roffit</a>.
</body></html>

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,25 +1,13 @@
/***************************************************************************
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
* $Id: 10-at-a-time.c,v 1.9 2008-09-22 17:27:24 danf Exp $
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* Example application source code using the multi interface to download many
* Example application source code using the multi interface to download many
* files, but with a capped maximum amount of simultaneous transfers.
*
* Written by Michael Wallner
@ -132,7 +120,7 @@ int main(void)
}
while (U) {
curl_multi_perform(cm, &U);
while (CURLM_CALL_MULTI_PERFORM == curl_multi_perform(cm, &U));
if (U) {
FD_ZERO(&R);

View File

@ -1,64 +1,36 @@
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
# $Id: Makefile.am,v 1.52 2008-08-07 00:29:08 yangtse Exp $
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://curl.haxx.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
###########################################################################
AUTOMAKE_OPTIONS = foreign nostdinc
EXTRA_DIST = README Makefile.example Makefile.inc Makefile.m32 \
Makefile.netware makefile.dj $(COMPLICATED_EXAMPLES)
makefile.dj $(COMPLICATED_EXAMPLES)
# Specify our include paths here, and do it relative to $(top_srcdir) and
# $(top_builddir), to ensure that these paths which belong to the library
# being currently built and tested are searched before the library which
# might possibly already be installed in the system.
#
# $(top_builddir)/include/curl for generated curlbuild.h included from curl.h
# $(top_builddir)/include for generated curlbuild.h inc. from lib/curl_setup.h
# $(top_builddir)/include is for libcurl's generated curl/curlbuild.h file
# $(top_srcdir)/include is for libcurl's external include files
AM_CPPFLAGS = -I$(top_builddir)/include/curl \
-I$(top_builddir)/include \
-I$(top_srcdir)/include
INCLUDES = -I$(top_builddir)/include \
-I$(top_srcdir)/include
LIBDIR = $(top_builddir)/lib
# Avoid libcurl obsolete stuff
AM_CPPFLAGS += -DCURL_NO_OLDIES
# Mostly for Windows build targets, when using static libcurl
if USE_CPPFLAG_CURL_STATICLIB
AM_CPPFLAGS += -DCURL_STATICLIB
if STATICLIB
# we need this define when building with a static lib on Windows
STATICCPPFLAGS = -DCURL_STATICLIB
endif
# Prevent LIBS from being used for all link targets
LIBS = $(BLANK_AT_MAKETIME)
CPPFLAGS = -DCURL_NO_OLDIES $(STATICCPPFLAGS)
# Dependencies
if USE_EXPLICIT_LIB_DEPS
LDADD = $(LIBDIR)/libcurl.la @LIBCURL_LIBS@
else
LDADD = $(LIBDIR)/libcurl.la
endif
# Makefile.inc provides the check_PROGRAMS and COMPLICATED_EXAMPLES defines
include Makefile.inc
all: $(check_PROGRAMS)

View File

@ -1,24 +1,12 @@
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
#############################################################################
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
# $Id: Makefile.example,v 1.4 2007-08-11 20:57:54 bagder Exp $
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://curl.haxx.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
###########################################################################
# What to call the final executable
TARGET = example

File diff suppressed because it is too large Load Diff

View File

@ -1,15 +1,16 @@
# These are all libcurl example programs to be test compiled
check_PROGRAMS = 10-at-a-time anyauthput cookie_interface debug fileupload \
fopen ftpget ftpgetresp ftpupload getinfo getinmemory http-post httpput \
https multi-app multi-debugcallback multi-double multi-post multi-single \
persistant post-callback postit2 sepheaders simple simplepost simplessl \
sendrecv httpcustomheader certinfo chkspeed ftpgetinfo ftp-wildcard \
smtp-multi simplesmtp smtp-tls rtsp externalsocket resolve \
progressfunc pop3s pop3slist imap url2file sftpget ftpsget
check_PROGRAMS = 10-at-a-time anyauthput cookie_interface \
debug fileupload fopen ftpget ftpgetresp ftpupload \
getinfo getinmemory http-post httpput \
https multi-app multi-debugcallback multi-double \
multi-post multi-single persistant post-callback \
postit2 sepheaders simple simplepost simplessl \
sendrecv httpcustomheader certinfo
# These examples require external dependencies that may not be commonly
# available on POSIX systems, so don't bother attempting to compile them here.
COMPLICATED_EXAMPLES = curlgtk.c curlx.c htmltitle.cpp cacertinmem.c \
ftpuploadresume.c ghiper.c hiperfifo.c htmltidy.c multithread.c \
opensslthreadlock.c sampleconv.c synctime.c threaded-ssl.c evhiperfifo.c \
smooth-gtk-thread.c version-check.pl href_extractor.c asiohiper.cpp
COMPLICATED_EXAMPLES = \
curlgtk.c curlx.c htmltitle.cc cacertinmem.c ftpuploadresume.c \
ghiper.c hiperfifo.c htmltidy.c multithread.c \
opensslthreadlock.c sampleconv.c synctime.c threaded-ssl.c

View File

@ -1,62 +1,29 @@
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#########################################################################
# $Id: Makefile.m32,v 1.4 2008-05-09 16:31:51 yangtse Exp $
#
# Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://curl.haxx.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
###########################################################################
#
## Makefile for building curl examples with MingW (GCC-3.2 or later)
## and optionally OpenSSL (0.9.8), libssh2 (1.3), zlib (1.2.5), librtmp (2.3)
## Makefile for building curl examples with MingW32
## and optionally OpenSSL (0.9.8), libssh2 (0.18), zlib (1.2.3)
##
## Usage: mingw32-make -f Makefile.m32 CFG=-feature1[-feature2][-feature3][...]
## Example: mingw32-make -f Makefile.m32 CFG=-zlib-ssl-spi-winidn
## Usage:
## mingw32-make -f Makefile.m32 [SSL=1] [SSH2=1] [ZLIB=1] [SSPI=1] [IPV6=1] [DYN=1]
##
## Hint: you can also set environment vars to control the build, f.e.:
## set ZLIB_PATH=c:/zlib-1.2.7
## set ZLIB_PATH=c:/zlib-1.2.3
## set ZLIB=1
#
###########################################################################
##
#########################################################################
# Edit the path below to point to the base of your Zlib sources.
ifndef ZLIB_PATH
ZLIB_PATH = ../../../zlib-1.2.7
ZLIB_PATH = ../../zlib-1.2.3
endif
# Edit the path below to point to the base of your OpenSSL package.
ifndef OPENSSL_PATH
OPENSSL_PATH = ../../../openssl-0.9.8x
OPENSSL_PATH = ../../openssl-0.9.8g
endif
# Edit the path below to point to the base of your LibSSH2 package.
ifndef LIBSSH2_PATH
LIBSSH2_PATH = ../../../libssh2-1.4.3
endif
# Edit the path below to point to the base of your librtmp package.
ifndef LIBRTMP_PATH
LIBRTMP_PATH = ../../../librtmp-2.3
endif
# Edit the path below to point to the base of your libidn package.
ifndef LIBIDN_PATH
LIBIDN_PATH = ../../../libidn-1.18
endif
# Edit the path below to point to the base of your MS IDN package.
# Microsoft Internationalized Domain Names (IDN) Mitigation APIs 1.1
# http://www.microsoft.com/downloads/en/details.aspx?FamilyID=ad6158d7-ddba-416a-9109-07607425a815
ifndef WINIDN_PATH
WINIDN_PATH = ../../../Microsoft IDN Mitigation APIs
LIBSSH2_PATH = ../../libssh2-0.18
endif
# Edit the path below to point to the base of your Novell LDAP NDK.
ifndef LDAP_SDK
@ -64,108 +31,25 @@ LDAP_SDK = c:/novell/ndk/cldapsdk/win32
endif
PROOT = ../..
ARES_LIB = $(PROOT)/ares
# Edit the path below to point to the base of your c-ares package.
ifndef LIBCARES_PATH
LIBCARES_PATH = $(PROOT)/ares
endif
SSL = 1
ZLIB = 1
# Edit the var below to set to your architecture or set environment var.
ifndef ARCH
ARCH = w32
endif
CC = $(CROSSPREFIX)gcc
CFLAGS = -g -O2 -Wall
CFLAGS += -fno-strict-aliasing
ifeq ($(ARCH),w64)
CFLAGS += -D_AMD64_
endif
CC = gcc
CFLAGS = -g -O2 -Wall
# comment LDFLAGS below to keep debug info
LDFLAGS = -s
RC = $(CROSSPREFIX)windres
RCFLAGS = --include-dir=$(PROOT)/include -O COFF -i
# Platform-dependent helper tool macros
ifeq ($(findstring /sh,$(SHELL)),/sh)
DEL = rm -f $1
RMDIR = rm -fr $1
MKDIR = mkdir -p $1
COPY = -cp -afv $1 $2
#COPYR = -cp -afr $1/* $2
COPYR = -rsync -aC $1/* $2
TOUCH = touch $1
CAT = cat
ECHONL = echo ""
DL = '
else
ifeq "$(OS)" "Windows_NT"
DEL = -del 2>NUL /q /f $(subst /,\,$1)
RMDIR = -rd 2>NUL /q /s $(subst /,\,$1)
else
DEL = -del 2>NUL $(subst /,\,$1)
RMDIR = -deltree 2>NUL /y $(subst /,\,$1)
endif
MKDIR = -md 2>NUL $(subst /,\,$1)
COPY = -copy 2>NUL /y $(subst /,\,$1) $(subst /,\,$2)
COPYR = -xcopy 2>NUL /q /y /e $(subst /,\,$1) $(subst /,\,$2)
TOUCH = copy 2>&1>NUL /b $(subst /,\,$1) +,,
CAT = type
ECHONL = $(ComSpec) /c echo.
endif
LDFLAGS = -s
RC = windres
RCFLAGS = --include-dir=$(PROOT)/include -O COFF -i
RM = del /q /f > NUL 2>&1
CP = copy
########################################################
## Nothing more to do below this line!
ifeq ($(findstring -dyn,$(CFG)),-dyn)
DYN = 1
endif
ifeq ($(findstring -ares,$(CFG)),-ares)
ARES = 1
endif
ifeq ($(findstring -rtmp,$(CFG)),-rtmp)
RTMP = 1
SSL = 1
ZLIB = 1
endif
ifeq ($(findstring -ssh2,$(CFG)),-ssh2)
SSH2 = 1
SSL = 1
ZLIB = 1
endif
ifeq ($(findstring -ssl,$(CFG)),-ssl)
SSL = 1
endif
ifeq ($(findstring -zlib,$(CFG)),-zlib)
ZLIB = 1
endif
ifeq ($(findstring -idn,$(CFG)),-idn)
IDN = 1
endif
ifeq ($(findstring -winidn,$(CFG)),-winidn)
WINIDN = 1
endif
ifeq ($(findstring -sspi,$(CFG)),-sspi)
SSPI = 1
endif
ifeq ($(findstring -spnego,$(CFG)),-spnego)
SPNEGO = 1
endif
ifeq ($(findstring -ldaps,$(CFG)),-ldaps)
LDAPS = 1
endif
ifeq ($(findstring -ipv6,$(CFG)),-ipv6)
IPV6 = 1
endif
ifeq ($(findstring -metalink,$(CFG)),-metalink)
METALINK = 1
endif
ifeq ($(findstring -winssl,$(CFG)),-winssl)
SCHANNEL = 1
SSPI = 1
endif
INCLUDES = -I. -I$(PROOT) -I$(PROOT)/include -I$(PROOT)/lib
LINK = $(CC) $(LDFLAGS) -o $@
ifdef DYN
curl_DEPENDENCIES = $(PROOT)/lib/libcurldll.a $(PROOT)/lib/libcurl.dll
@ -174,67 +58,37 @@ else
curl_DEPENDENCIES = $(PROOT)/lib/libcurl.a
curl_LDADD = -L$(PROOT)/lib -lcurl
CFLAGS += -DCURL_STATICLIB
LDFLAGS += -static
endif
ifdef ARES
ifndef DYN
curl_DEPENDENCIES += $(LIBCARES_PATH)/libcares.a
curl_DEPENDENCIES += $(ARES_LIB)/libcares.a
endif
CFLAGS += -DUSE_ARES
curl_LDADD += -L"$(LIBCARES_PATH)" -lcares
endif
ifdef RTMP
CFLAGS += -DUSE_LIBRTMP
curl_LDADD += -L"$(LIBRTMP_PATH)/librtmp" -lrtmp -lwinmm
curl_LDADD += -L$(ARES_LIB) -lcares
endif
ifdef SSH2
CFLAGS += -DUSE_LIBSSH2 -DHAVE_LIBSSH2_H
curl_LDADD += -L"$(LIBSSH2_PATH)/win32" -lssh2
curl_LDADD += -L$(LIBSSH2_PATH)/win32 -lssh2
endif
ifdef SSL
ifndef OPENSSL_LIBPATH
OPENSSL_LIBS = -lssl -lcrypto
ifeq "$(wildcard $(OPENSSL_PATH)/out)" "$(OPENSSL_PATH)/out"
OPENSSL_LIBPATH = $(OPENSSL_PATH)/out
ifdef DYN
OPENSSL_LIBS = -lssl32 -leay32
endif
endif
ifeq "$(wildcard $(OPENSSL_PATH)/lib)" "$(OPENSSL_PATH)/lib"
OPENSSL_LIBPATH = $(OPENSSL_PATH)/lib
endif
INCLUDES += -I"$(OPENSSL_PATH)/outinc"
CFLAGS += -DUSE_SSLEAY -DHAVE_OPENSSL_ENGINE_H
ifdef DYN
curl_LDADD += -L$(OPENSSL_PATH)/out -leay32 -lssl32
else
curl_LDADD += -L$(OPENSSL_PATH)/out -lssl -lcrypto -lgdi32
endif
ifndef DYN
OPENSSL_LIBS += -lgdi32 -lcrypt32
endif
CFLAGS += -DUSE_SSLEAY
curl_LDADD += -L"$(OPENSSL_LIBPATH)" $(OPENSSL_LIBS)
endif
ifdef ZLIB
INCLUDES += -I"$(ZLIB_PATH)"
CFLAGS += -DHAVE_LIBZ -DHAVE_ZLIB_H
curl_LDADD += -L"$(ZLIB_PATH)" -lz
endif
ifdef IDN
CFLAGS += -DUSE_LIBIDN
curl_LDADD += -L"$(LIBIDN_PATH)/lib" -lidn
else
ifdef WINIDN
CFLAGS += -DUSE_WIN32_IDN
curl_LDADD += -L"$(WINIDN_PATH)" -lnormaliz
endif
curl_LDADD += -L$(ZLIB_PATH) -lz
endif
ifdef SSPI
CFLAGS += -DUSE_WINDOWS_SSPI
ifdef SCHANNEL
CFLAGS += -DUSE_SCHANNEL
endif
endif
ifdef SPNEGO
CFLAGS += -DHAVE_SPNEGO
endif
ifdef IPV6
CFLAGS += -DENABLE_IPV6 -D_WIN32_WINNT=0x0501
CFLAGS += -DENABLE_IPV6
endif
ifdef LDAPS
CFLAGS += -DHAVE_LDAP_SSL
@ -249,34 +103,32 @@ ifdef USE_LDAP_OPENLDAP
endif
ifndef USE_LDAP_NOVELL
ifndef USE_LDAP_OPENLDAP
curl_LDADD += -lwldap32
curl_LDADD += -lwldap32
endif
endif
curl_LDADD += -lws2_32
COMPILE = $(CC) $(INCLUDES) $(CFLAGS)
# Makefile.inc provides the check_PROGRAMS and COMPLICATED_EXAMPLES defines
include Makefile.inc
check_PROGRAMS := $(patsubst %,%.exe,$(strip $(check_PROGRAMS)))
check_PROGRAMS += ftpuploadresume.exe synctime.exe
example_PROGRAMS := $(patsubst %,%.exe,$(strip $(check_PROGRAMS)))
.PRECIOUS: %.o
.SUFFIXES: .rc .res .o .exe
all: $(check_PROGRAMS)
all: $(example_PROGRAMS)
%.exe: %.o $(curl_DEPENDENCIES)
$(CC) $(LDFLAGS) -o $@ $< $(curl_LDADD)
.o.exe: $(curl_DEPENDENCIES)
$(LINK) $< $(curl_LDADD)
%.o: %.c
$(CC) $(INCLUDES) $(CFLAGS) -c $<
.c.o:
$(COMPILE) -c $<
%.res: %.rc
.rc.res:
$(RC) $(RCFLAGS) $< -o $@
clean:
@$(call DEL, $(check_PROGRAMS:.exe=.o))
$(RM) $(example_PROGRAMS)
distclean vclean: clean
@$(call DEL, $(check_PROGRAMS))

View File

@ -1,441 +0,0 @@
#################################################################
#
## Makefile for building curl.nlm (NetWare version - gnu make)
## Use: make -f Makefile.netware
##
## Comments to: Guenter Knauf http://www.gknw.net/phpbb
#
#################################################################
# Edit the path below to point to the base of your Novell NDK.
ifndef NDKBASE
NDKBASE = c:/novell
endif
# Edit the path below to point to the base of your Zlib sources.
ifndef ZLIB_PATH
ZLIB_PATH = ../../../zlib-1.2.7
endif
# Edit the path below to point to the base of your OpenSSL package.
ifndef OPENSSL_PATH
OPENSSL_PATH = ../../../openssl-0.9.8x
endif
# Edit the path below to point to the base of your LibSSH2 package.
ifndef LIBSSH2_PATH
LIBSSH2_PATH = ../../../libssh2-1.4.3
endif
# Edit the path below to point to the base of your axTLS package.
ifndef AXTLS_PATH
AXTLS_PATH = ../../../axTLS-1.2.7
endif
# Edit the path below to point to the base of your libidn package.
ifndef LIBIDN_PATH
LIBIDN_PATH = ../../../libidn-1.18
endif
# Edit the path below to point to the base of your librtmp package.
ifndef LIBRTMP_PATH
LIBRTMP_PATH = ../../../librtmp-2.3
endif
# Edit the path below to point to the base of your fbopenssl package.
ifndef FBOPENSSL_PATH
FBOPENSSL_PATH = ../../fbopenssl-0.4
endif
# Edit the path below to point to the base of your c-ares package.
ifndef LIBCARES_PATH
LIBCARES_PATH = ../../ares
endif
ifndef INSTDIR
INSTDIR = ..$(DS)..$(DS)curl-$(LIBCURL_VERSION_STR)-bin-nw
endif
# Edit the vars below to change NLM target settings.
TARGET = examples
VERSION = $(LIBCURL_VERSION)
COPYR = Copyright (C) $(LIBCURL_COPYRIGHT_STR)
DESCR = cURL ($(LIBARCH))
MTSAFE = YES
STACK = 8192
SCREEN = Example Program
# Comment the line below if you dont want to load protected automatically.
# LDRING = 3
# Uncomment the next line to enable linking with POSIX semantics.
# POSIXFL = 1
# Edit the var below to point to your lib architecture.
ifndef LIBARCH
LIBARCH = LIBC
endif
# must be equal to NDEBUG or DEBUG, CURLDEBUG
ifndef DB
DB = NDEBUG
endif
# Optimization: -O<n> or debugging: -g
ifeq ($(DB),NDEBUG)
OPT = -O2
OBJDIR = release
else
OPT = -g
OBJDIR = debug
endif
# The following lines defines your compiler.
ifdef CWFolder
METROWERKS = $(CWFolder)
endif
ifdef METROWERKS
# MWCW_PATH = $(subst \,/,$(METROWERKS))/Novell Support
MWCW_PATH = $(subst \,/,$(METROWERKS))/Novell Support/Metrowerks Support
CC = mwccnlm
else
CC = gcc
endif
PERL = perl
# Here you can find a native Win32 binary of the original awk:
# http://www.gknw.net/development/prgtools/awk-20100523.zip
AWK = awk
CP = cp -afv
MKDIR = mkdir
# RM = rm -f
# If you want to mark the target as MTSAFE you will need a tool for
# generating the xdc data for the linker; here's a minimal tool:
# http://www.gknw.net/development/prgtools/mkxdc.zip
MPKXDC = mkxdc
# LIBARCH_U = $(shell $(AWK) 'BEGIN {print toupper(ARGV[1])}' $(LIBARCH))
LIBARCH_L = $(shell $(AWK) 'BEGIN {print tolower(ARGV[1])}' $(LIBARCH))
# Include the version info retrieved from curlver.h
-include $(OBJDIR)/version.inc
# Global flags for all compilers
CFLAGS += $(OPT) -D$(DB) -DNETWARE -DHAVE_CONFIG_H -nostdinc
ifeq ($(CC),mwccnlm)
LD = mwldnlm
LDFLAGS = -nostdlib $< $(PRELUDE) $(LDLIBS) -o $@ -commandfile
LIBEXT = lib
CFLAGS += -gccinc -inline off -opt nointrinsics -proc 586
CFLAGS += -relax_pointers
#CFLAGS += -w on
ifeq ($(LIBARCH),LIBC)
ifeq ($(POSIXFL),1)
PRELUDE = $(NDK_LIBC)/imports/posixpre.o
else
PRELUDE = $(NDK_LIBC)/imports/libcpre.o
endif
CFLAGS += -align 4
else
# PRELUDE = $(NDK_CLIB)/imports/clibpre.o
# to avoid the __init_* / __deinit_* whoes dont use prelude from NDK
PRELUDE = "$(MWCW_PATH)/libraries/runtime/prelude.obj"
# CFLAGS += -include "$(MWCW_PATH)/headers/nlm_clib_prefix.h"
CFLAGS += -align 1
endif
else
LD = nlmconv
LDFLAGS = -T
LIBEXT = a
CFLAGS += -m32
CFLAGS += -fno-builtin -fno-strict-aliasing
ifeq ($(findstring gcc,$(CC)),gcc)
CFLAGS += -fpcc-struct-return
endif
CFLAGS += -Wall # -pedantic
ifeq ($(LIBARCH),LIBC)
ifeq ($(POSIXFL),1)
PRELUDE = $(NDK_LIBC)/imports/posixpre.gcc.o
else
PRELUDE = $(NDK_LIBC)/imports/libcpre.gcc.o
endif
else
# PRELUDE = $(NDK_CLIB)/imports/clibpre.gcc.o
# to avoid the __init_* / __deinit_* whoes dont use prelude from NDK
# http://www.gknw.net/development/mk_nlm/gcc_pre.zip
PRELUDE = $(NDK_ROOT)/pre/prelude.o
CFLAGS += -include $(NDKBASE)/nlmconv/genlm.h
endif
endif
NDK_ROOT = $(NDKBASE)/ndk
ifndef NDK_CLIB
NDK_CLIB = $(NDK_ROOT)/nwsdk
endif
ifndef NDK_LIBC
NDK_LIBC = $(NDK_ROOT)/libc
endif
ifndef NDK_LDAP
NDK_LDAP = $(NDK_ROOT)/cldapsdk/netware
endif
CURL_INC = ../../include
CURL_LIB = ../../lib
INCLUDES = -I$(CURL_INC)
ifeq ($(findstring -static,$(CFG)),-static)
LINK_STATIC = 1
endif
ifeq ($(findstring -ares,$(CFG)),-ares)
WITH_ARES = 1
endif
ifeq ($(findstring -rtmp,$(CFG)),-rtmp)
WITH_RTMP = 1
WITH_SSL = 1
WITH_ZLIB = 1
endif
ifeq ($(findstring -ssh2,$(CFG)),-ssh2)
WITH_SSH2 = 1
WITH_SSL = 1
WITH_ZLIB = 1
endif
ifeq ($(findstring -axtls,$(CFG)),-axtls)
WITH_AXTLS = 1
WITH_SSL =
else
ifeq ($(findstring -ssl,$(CFG)),-ssl)
WITH_SSL = 1
endif
endif
ifeq ($(findstring -zlib,$(CFG)),-zlib)
WITH_ZLIB = 1
endif
ifeq ($(findstring -idn,$(CFG)),-idn)
WITH_IDN = 1
endif
ifeq ($(findstring -spnego,$(CFG)),-spnego)
WITH_SPNEGO = 1
endif
ifeq ($(findstring -ipv6,$(CFG)),-ipv6)
ENABLE_IPV6 = 1
endif
ifdef LINK_STATIC
LDLIBS = $(CURL_LIB)/libcurl.$(LIBEXT)
ifdef WITH_ARES
LDLIBS += $(LIBCARES_PATH)/libcares.$(LIBEXT)
endif
else
MODULES = libcurl.nlm
IMPORTS = @$(CURL_LIB)/libcurl.imp
endif
ifdef WITH_SSH2
# INCLUDES += -I$(LIBSSH2_PATH)/include
ifdef LINK_STATIC
LDLIBS += $(LIBSSH2_PATH)/nw/libssh2.$(LIBEXT)
else
MODULES += libssh2.nlm
IMPORTS += @$(LIBSSH2_PATH)/nw/libssh2.imp
endif
endif
ifdef WITH_RTMP
# INCLUDES += -I$(LIBRTMP_PATH)
ifdef LINK_STATIC
LDLIBS += $(LIBRTMP_PATH)/librtmp/librtmp.$(LIBEXT)
endif
endif
ifdef WITH_SSL
INCLUDES += -I$(OPENSSL_PATH)/outinc_nw_$(LIBARCH_L)
LDLIBS += $(OPENSSL_PATH)/out_nw_$(LIBARCH_L)/ssl.$(LIBEXT)
LDLIBS += $(OPENSSL_PATH)/out_nw_$(LIBARCH_L)/crypto.$(LIBEXT)
IMPORTS += GetProcessSwitchCount RunningProcess
ifdef WITH_SPNEGO
# INCLUDES += -I$(FBOPENSSL_PATH)/include
LDLIBS += $(FBOPENSSL_PATH)/nw/fbopenssl.$(LIBEXT)
endif
else
ifdef WITH_AXTLS
INCLUDES += -I$(AXTLS_PATH)/inc
ifdef LINK_STATIC
LDLIBS += $(AXTLS_PATH)/lib/libaxtls.$(LIBEXT)
else
MODULES += libaxtls.nlm
IMPORTS += $(AXTLS_PATH)/lib/libaxtls.imp
endif
endif
endif
ifdef WITH_ZLIB
# INCLUDES += -I$(ZLIB_PATH)
ifdef LINK_STATIC
LDLIBS += $(ZLIB_PATH)/nw/$(LIBARCH)/libz.$(LIBEXT)
else
MODULES += libz.nlm
IMPORTS += @$(ZLIB_PATH)/nw/$(LIBARCH)/libz.imp
endif
endif
ifdef WITH_IDN
# INCLUDES += -I$(LIBIDN_PATH)/include
LDLIBS += $(LIBIDN_PATH)/lib/libidn.$(LIBEXT)
endif
ifeq ($(LIBARCH),LIBC)
INCLUDES += -I$(NDK_LIBC)/include
# INCLUDES += -I$(NDK_LIBC)/include/nks
# INCLUDES += -I$(NDK_LIBC)/include/winsock
CFLAGS += -D_POSIX_SOURCE
else
INCLUDES += -I$(NDK_CLIB)/include/nlm
# INCLUDES += -I$(NDK_CLIB)/include
endif
ifndef DISABLE_LDAP
# INCLUDES += -I$(NDK_LDAP)/$(LIBARCH_L)/inc
endif
CFLAGS += $(INCLUDES)
ifeq ($(MTSAFE),YES)
XDCOPT = -n
endif
ifeq ($(MTSAFE),NO)
XDCOPT = -u
endif
ifdef XDCOPT
XDCDATA = $(OBJDIR)/$(TARGET).xdc
endif
ifeq ($(findstring /sh,$(SHELL)),/sh)
DL = '
DS = /
PCT = %
#-include $(NDKBASE)/nlmconv/ncpfs.inc
else
DS = \\
PCT = %%
endif
# Makefile.inc provides the CSOURCES and HHEADERS defines
include Makefile.inc
check_PROGRAMS := $(patsubst %,%.nlm,$(strip $(check_PROGRAMS)))
.PRECIOUS: $(OBJDIR)/%.o $(OBJDIR)/%.def $(OBJDIR)/%.xdc
all: prebuild $(check_PROGRAMS)
prebuild: $(OBJDIR) $(OBJDIR)/version.inc
$(OBJDIR)/%.o: %.c
@echo Compiling $<
$(CC) $(CFLAGS) -c $< -o $@
$(OBJDIR)/version.inc: $(CURL_INC)/curl/curlver.h $(OBJDIR)
@echo Creating $@
@$(AWK) -f ../../packages/NetWare/get_ver.awk $< > $@
install: $(INSTDIR) all
@$(CP) $(check_PROGRAMS) $(INSTDIR)
clean:
-$(RM) -r $(OBJDIR)
distclean vclean: clean
-$(RM) $(check_PROGRAMS)
$(OBJDIR) $(INSTDIR):
@$(MKDIR) $@
%.nlm: $(OBJDIR)/%.o $(OBJDIR)/%.def $(XDCDATA)
@echo Linking $@
@-$(RM) $@
@$(LD) $(LDFLAGS) $(OBJDIR)/$(@:.nlm=.def)
$(OBJDIR)/%.xdc: Makefile.netware
@echo Creating $@
@$(MPKXDC) $(XDCOPT) $@
$(OBJDIR)/%.def: Makefile.netware
@echo $(DL)# DEF file for linking with $(LD)$(DL) > $@
@echo $(DL)# Do not edit this file - it is created by Make!$(DL) >> $@
@echo $(DL)# All your changes will be lost!!$(DL) >> $@
@echo $(DL)#$(DL) >> $@
@echo $(DL)copyright "$(COPYR)"$(DL) >> $@
@echo $(DL)description "$(DESCR) $(notdir $(@:.def=)) Example"$(DL) >> $@
@echo $(DL)version $(VERSION)$(DL) >> $@
ifdef NLMTYPE
@echo $(DL)type $(NLMTYPE)$(DL) >> $@
endif
ifdef STACK
@echo $(DL)stack $(STACK)$(DL) >> $@
endif
ifdef SCREEN
@echo $(DL)screenname "$(DESCR) $(notdir $(@:.def=)) $(SCREEN)"$(DL) >> $@
else
@echo $(DL)screenname "DEFAULT"$(DL) >> $@
endif
ifneq ($(DB),NDEBUG)
@echo $(DL)debug$(DL) >> $@
endif
@echo $(DL)threadname "_$(notdir $(@:.def=))"$(DL) >> $@
ifdef XDCDATA
@echo $(DL)xdcdata $(XDCDATA)$(DL) >> $@
endif
ifeq ($(LDRING),0)
@echo $(DL)flag_on 16$(DL) >> $@
endif
ifeq ($(LDRING),3)
@echo $(DL)flag_on 512$(DL) >> $@
endif
ifeq ($(LIBARCH),CLIB)
@echo $(DL)start _Prelude$(DL) >> $@
@echo $(DL)exit _Stop$(DL) >> $@
@echo $(DL)import @$(NDK_CLIB)/imports/clib.imp$(DL) >> $@
@echo $(DL)import @$(NDK_CLIB)/imports/threads.imp$(DL) >> $@
@echo $(DL)import @$(NDK_CLIB)/imports/nlmlib.imp$(DL) >> $@
@echo $(DL)import @$(NDK_CLIB)/imports/socklib.imp$(DL) >> $@
@echo $(DL)module clib$(DL) >> $@
ifndef DISABLE_LDAP
@echo $(DL)import @$(NDK_LDAP)/clib/imports/ldapsdk.imp$(DL) >> $@
@echo $(DL)import @$(NDK_LDAP)/clib/imports/ldapssl.imp$(DL) >> $@
# @echo $(DL)import @$(NDK_LDAP)/clib/imports/ldapx.imp$(DL) >> $@
@echo $(DL)module ldapsdk ldapssl$(DL) >> $@
endif
else
ifeq ($(POSIXFL),1)
@echo $(DL)flag_on 4194304$(DL) >> $@
endif
@echo $(DL)flag_on 64$(DL) >> $@
@echo $(DL)pseudopreemption$(DL) >> $@
ifeq ($(findstring posixpre,$(PRELUDE)),posixpre)
@echo $(DL)start POSIX_Start$(DL) >> $@
@echo $(DL)exit POSIX_Stop$(DL) >> $@
@echo $(DL)check POSIX_CheckUnload$(DL) >> $@
else
@echo $(DL)start _LibCPrelude$(DL) >> $@
@echo $(DL)exit _LibCPostlude$(DL) >> $@
@echo $(DL)check _LibCCheckUnload$(DL) >> $@
endif
@echo $(DL)import @$(NDK_LIBC)/imports/libc.imp$(DL) >> $@
@echo $(DL)import @$(NDK_LIBC)/imports/netware.imp$(DL) >> $@
@echo $(DL)module libc$(DL) >> $@
ifndef DISABLE_LDAP
@echo $(DL)import @$(NDK_LDAP)/libc/imports/lldapsdk.imp$(DL) >> $@
@echo $(DL)import @$(NDK_LDAP)/libc/imports/lldapssl.imp$(DL) >> $@
# @echo $(DL)import @$(NDK_LDAP)/libc/imports/lldapx.imp$(DL) >> $@
@echo $(DL)module lldapsdk lldapssl$(DL) >> $@
endif
endif
ifdef MODULES
@echo $(DL)module $(MODULES)$(DL) >> $@
endif
ifdef EXPORTS
@echo $(DL)export $(EXPORTS)$(DL) >> $@
endif
ifdef IMPORTS
@echo $(DL)import $(IMPORTS)$(DL) >> $@
endif
ifeq ($(findstring nlmconv,$(LD)),nlmconv)
@echo $(DL)input $(PRELUDE)$(DL) >> $@
@echo $(DL)input $(@:.def=.o)$(DL) >> $@
ifdef LDLIBS
@echo $(DL)input $(LDLIBS)$(DL) >> $@
endif
@echo $(DL)output $(notdir $(@:.def=.nlm))$(DL) >> $@
endif

View File

@ -55,7 +55,6 @@ htmltitle.cc - download a HTML file and extract the <title> tag from a HTML
http-post.c - HTTP POST
httpput.c - HTTP PUT a local file
https.c - simple HTTPS transfer
imap.c - simple IMAP transfer
multi-app.c - a multi-interface app
multi-debugcallback.c - a multi-interface app using the debug callback
multi-double.c - a multi-interface app doing two simultaneous transfers
@ -64,8 +63,6 @@ multi-single.c - a multi-interface app getting a single file
multithread.c - an example using multi-treading transferring multiple files
opensslthreadlock.c - show how to do locking when using OpenSSL multi-threaded
persistant.c - request two URLs with a persistent connection
pop3s.c - POP3S transfer
pop3slist.c - POP3S LIST
post-callback.c - send a HTTP POST using a callback
postit2.c - send a HTTP multipart formpost
sampleconv.c - showing how a program on a non-ASCII platform would invoke
@ -76,5 +73,4 @@ simple.c - the most simple download a URL source
simplepost.c - HTTP POST
simplessl.c - HTTPS example with certificates many options set
synctime.c - Sync local time by extracting date from remote HTTP servers
url2file.c - download a document and store it in a file
10-at-a-time.c - Download many files simultaneously, 10 at a time.

View File

@ -1,35 +1,19 @@
/***************************************************************************
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
* $Id: anyauthput.c,v 1.7 2008-08-31 12:12:35 yangtse Exp $
*/
#include <stdio.h>
#include <fcntl.h>
#ifdef WIN32
# include <io.h>
#else
# ifdef __VMS
typedef int intptr_t;
# endif
# if !defined(_AIX) && !defined(__sgi) && !defined(__osf__)
# include <stdint.h>
# endif
# include <stdint.h>
# include <unistd.h>
#endif
#include <sys/types.h>
@ -53,12 +37,6 @@
#define TRUE 1
#endif
#if defined(_AIX) || defined(__sgi) || defined(__osf__)
#ifndef intptr_t
#define intptr_t long
#endif
#endif
/*
* This example shows a HTTP PUT operation with authentiction using "any"
* type. It PUTs a file given as a command line argument to the URL also given
@ -96,16 +74,12 @@ static curlioerr my_ioctl(CURL *handle, curliocmd cmd, void *userp)
static size_t read_callback(void *ptr, size_t size, size_t nmemb, void *stream)
{
size_t retcode;
curl_off_t nread;
intptr_t fd = (intptr_t)stream;
retcode = read(fd, ptr, size * nmemb);
nread = (curl_off_t)retcode;
fprintf(stderr, "*** We read %" CURL_FORMAT_CURL_OFF_T
" bytes from file\n", nread);
fprintf(stderr, "*** We read %d bytes from file\n", retcode);
return retcode;
}
@ -158,7 +132,7 @@ int main(int argc, char **argv)
/* and give the size of the upload, this supports large file sizes
on systems that have general support for it */
curl_easy_setopt(curl, CURLOPT_INFILESIZE_LARGE,
(curl_off_t)file_info.st_size);
(curl_off_t)file_info.st_size);
/* tell libcurl we can use "any" auth, which lets the lib pick one, but it
also costs one extra round-trip and possibly sending of all the PUT
@ -170,10 +144,6 @@ int main(int argc, char **argv)
/* Now run off and do what you've been told! */
res = curl_easy_perform(curl);
/* Check for errors */
if(res != CURLE_OK)
fprintf(stderr, "curl_easy_perform() failed: %s\n",
curl_easy_strerror(res));
/* always cleanup */
curl_easy_cleanup(curl);

View File

@ -1,454 +0,0 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/*
* file: asiohiper.cpp
* Example program to demonstrate the use of multi socket interface
* with boost::asio
*
* This program is in c++ and uses boost::asio instead of libevent/libev.
* Requires boost::asio, boost::bind and boost::system
*
* This is an adaptation of libcurl's "hiperfifo.c" and "evhiperfifo.c"
* sample programs. This example implements a subset of the functionality from
* hiperfifo.c, for full functionality refer hiperfifo.c or evhiperfifo.c
*
* Written by Lijo Antony based on hiperfifo.c by Jeff Pohlmeyer
*
* When running, the program creates an easy handle for a URL and
* uses the curl_multi API to fetch it.
*
* Note:
* For the sake of simplicity, URL is hard coded to "www.google.com"
*
* This is purely a demo app, all retrieved data is simply discarded by the write
* callback.
*/
#include <curl/curl.h>
#include <boost/asio.hpp>
#include <boost/bind.hpp>
#define MSG_OUT stdout /* Send info to stdout, change to stderr if you want */
/* boost::asio related objects
* using global variables for simplicity
*/
boost::asio::io_service io_service;
boost::asio::deadline_timer timer(io_service);
std::map<curl_socket_t, boost::asio::ip::tcp::socket *> socket_map;
/* Global information, common to all connections */
typedef struct _GlobalInfo
{
CURLM *multi;
int still_running;
} GlobalInfo;
/* Information associated with a specific easy handle */
typedef struct _ConnInfo
{
CURL *easy;
char *url;
GlobalInfo *global;
char error[CURL_ERROR_SIZE];
} ConnInfo;
static void timer_cb(const boost::system::error_code & error, GlobalInfo *g);
/* Update the event timer after curl_multi library calls */
static int multi_timer_cb(CURLM *multi, long timeout_ms, GlobalInfo *g)
{
fprintf(MSG_OUT, "\nmulti_timer_cb: timeout_ms %ld", timeout_ms);
/* cancel running timer */
timer.cancel();
if ( timeout_ms > 0 )
{
/* update timer */
timer.expires_from_now(boost::posix_time::millisec(timeout_ms));
timer.async_wait(boost::bind(&timer_cb, _1, g));
}
else
{
/* call timeout function immediately */
boost::system::error_code error; /*success*/
timer_cb(error, g);
}
return 0;
}
/* Die if we get a bad CURLMcode somewhere */
static void mcode_or_die(const char *where, CURLMcode code)
{
if ( CURLM_OK != code )
{
const char *s;
switch ( code )
{
case CURLM_CALL_MULTI_PERFORM: s="CURLM_CALL_MULTI_PERFORM"; break;
case CURLM_BAD_HANDLE: s="CURLM_BAD_HANDLE"; break;
case CURLM_BAD_EASY_HANDLE: s="CURLM_BAD_EASY_HANDLE"; break;
case CURLM_OUT_OF_MEMORY: s="CURLM_OUT_OF_MEMORY"; break;
case CURLM_INTERNAL_ERROR: s="CURLM_INTERNAL_ERROR"; break;
case CURLM_UNKNOWN_OPTION: s="CURLM_UNKNOWN_OPTION"; break;
case CURLM_LAST: s="CURLM_LAST"; break;
default: s="CURLM_unknown";
break;
case CURLM_BAD_SOCKET: s="CURLM_BAD_SOCKET";
fprintf(MSG_OUT, "\nERROR: %s returns %s", where, s);
/* ignore this error */
return;
}
fprintf(MSG_OUT, "\nERROR: %s returns %s", where, s);
exit(code);
}
}
/* Check for completed transfers, and remove their easy handles */
static void check_multi_info(GlobalInfo *g)
{
char *eff_url;
CURLMsg *msg;
int msgs_left;
ConnInfo *conn;
CURL *easy;
CURLcode res;
fprintf(MSG_OUT, "\nREMAINING: %d", g->still_running);
while ((msg = curl_multi_info_read(g->multi, &msgs_left)))
{
if (msg->msg == CURLMSG_DONE)
{
easy = msg->easy_handle;
res = msg->data.result;
curl_easy_getinfo(easy, CURLINFO_PRIVATE, &conn);
curl_easy_getinfo(easy, CURLINFO_EFFECTIVE_URL, &eff_url);
fprintf(MSG_OUT, "\nDONE: %s => (%d) %s", eff_url, res, conn->error);
curl_multi_remove_handle(g->multi, easy);
free(conn->url);
curl_easy_cleanup(easy);
free(conn);
}
}
}
/* Called by asio when there is an action on a socket */
static void event_cb(GlobalInfo * g, boost::asio::ip::tcp::socket * tcp_socket, int action)
{
fprintf(MSG_OUT, "\nevent_cb: action=%d", action);
CURLMcode rc;
rc = curl_multi_socket_action(g->multi, tcp_socket->native_handle(), action, &g->still_running);
mcode_or_die("event_cb: curl_multi_socket_action", rc);
check_multi_info(g);
if ( g->still_running <= 0 )
{
fprintf(MSG_OUT, "\nlast transfer done, kill timeout");
timer.cancel();
}
}
/* Called by asio when our timeout expires */
static void timer_cb(const boost::system::error_code & error, GlobalInfo *g)
{
if ( !error)
{
fprintf(MSG_OUT, "\ntimer_cb: ");
CURLMcode rc;
rc = curl_multi_socket_action(g->multi, CURL_SOCKET_TIMEOUT, 0, &g->still_running);
mcode_or_die("timer_cb: curl_multi_socket_action", rc);
check_multi_info(g);
}
}
/* Clean up any data */
static void remsock(int *f, GlobalInfo *g)
{
fprintf(MSG_OUT, "\nremsock: ");
if ( f )
{
free(f);
}
}
static void setsock(int *fdp, curl_socket_t s, CURL*e, int act, GlobalInfo*g)
{
fprintf(MSG_OUT, "\nsetsock: socket=%d, act=%d, fdp=%p", s, act, fdp);
std::map<curl_socket_t, boost::asio::ip::tcp::socket *>::iterator it = socket_map.find(s);
if ( it == socket_map.end() )
{
fprintf(MSG_OUT, "\nsocket %d is a c-ares socket, ignoring", s);
return;
}
boost::asio::ip::tcp::socket * tcp_socket = it->second;
*fdp = act;
if ( act == CURL_POLL_IN )
{
fprintf(MSG_OUT, "\nwatching for socket to become readable");
tcp_socket->async_read_some(boost::asio::null_buffers(),
boost::bind(&event_cb, g,
tcp_socket,
act));
}
else if ( act == CURL_POLL_OUT )
{
fprintf(MSG_OUT, "\nwatching for socket to become writable");
tcp_socket->async_write_some(boost::asio::null_buffers(),
boost::bind(&event_cb, g,
tcp_socket,
act));
}
else if ( act == CURL_POLL_INOUT )
{
fprintf(MSG_OUT, "\nwatching for socket to become readable & writable");
tcp_socket->async_read_some(boost::asio::null_buffers(),
boost::bind(&event_cb, g,
tcp_socket,
act));
tcp_socket->async_write_some(boost::asio::null_buffers(),
boost::bind(&event_cb, g,
tcp_socket,
act));
}
}
static void addsock(curl_socket_t s, CURL *easy, int action, GlobalInfo *g)
{
int *fdp = (int *)calloc(sizeof(int), 1); /* fdp is used to store current action */
setsock(fdp, s, easy, action, g);
curl_multi_assign(g->multi, s, fdp);
}
/* CURLMOPT_SOCKETFUNCTION */
static int sock_cb(CURL *e, curl_socket_t s, int what, void *cbp, void *sockp)
{
fprintf(MSG_OUT, "\nsock_cb: socket=%d, what=%d, sockp=%p", s, what, sockp);
GlobalInfo *g = (GlobalInfo*) cbp;
int *actionp = (int*) sockp;
const char *whatstr[]={ "none", "IN", "OUT", "INOUT", "REMOVE"};
fprintf(MSG_OUT,
"\nsocket callback: s=%d e=%p what=%s ", s, e, whatstr[what]);
if ( what == CURL_POLL_REMOVE )
{
fprintf(MSG_OUT, "\n");
remsock(actionp, g);
}
else
{
if ( !actionp )
{
fprintf(MSG_OUT, "\nAdding data: %s", whatstr[what]);
addsock(s, e, what, g);
}
else
{
fprintf(MSG_OUT,
"\nChanging action from %s to %s",
whatstr[*actionp], whatstr[what]);
setsock(actionp, s, e, what, g);
}
}
return 0;
}
/* CURLOPT_WRITEFUNCTION */
static size_t write_cb(void *ptr, size_t size, size_t nmemb, void *data)
{
size_t written = size * nmemb;
char* pBuffer = (char*)malloc(written + 1);
strncpy(pBuffer, (const char *)ptr, written);
pBuffer [written] = '\0';
fprintf(MSG_OUT, "%s", pBuffer);
free(pBuffer);
return written;
}
/* CURLOPT_PROGRESSFUNCTION */
static int prog_cb (void *p, double dltotal, double dlnow, double ult,
double uln)
{
ConnInfo *conn = (ConnInfo *)p;
(void)ult;
(void)uln;
fprintf(MSG_OUT, "\nProgress: %s (%g/%g)", conn->url, dlnow, dltotal);
fprintf(MSG_OUT, "\nProgress: %s (%g)", conn->url, ult);
return 0;
}
/* CURLOPT_OPENSOCKETFUNCTION */
static curl_socket_t opensocket(void *clientp,
curlsocktype purpose,
struct curl_sockaddr *address)
{
fprintf(MSG_OUT, "\nopensocket :");
curl_socket_t sockfd = CURL_SOCKET_BAD;
/* restrict to ipv4 */
if (purpose == CURLSOCKTYPE_IPCXN && address->family == AF_INET)
{
/* create a tcp socket object */
boost::asio::ip::tcp::socket *tcp_socket = new boost::asio::ip::tcp::socket(io_service);
/* open it and get the native handle*/
boost::system::error_code ec;
tcp_socket->open(boost::asio::ip::tcp::v4(), ec);
if (ec)
{
//An error occurred
std::cout << std::endl << "Couldn't open socket [" << ec << "][" << ec.message() << "]";
fprintf(MSG_OUT, "\nERROR: Returning CURL_SOCKET_BAD to signal error");
}
else
{
sockfd = tcp_socket->native_handle();
fprintf(MSG_OUT, "\nOpened socket %d", sockfd);
/* save it for monitoring */
socket_map.insert(std::pair<curl_socket_t, boost::asio::ip::tcp::socket *>(sockfd, tcp_socket));
}
}
return sockfd;
}
/* CURLOPT_CLOSESOCKETFUNCTION */
static int closesocket(void *clientp, curl_socket_t item)
{
fprintf(MSG_OUT, "\nclosesocket : %d", item);
std::map<curl_socket_t, boost::asio::ip::tcp::socket *>::iterator it = socket_map.find(item);
if ( it != socket_map.end() )
{
delete it->second;
socket_map.erase(it);
}
return 0;
}
/* Create a new easy handle, and add it to the global curl_multi */
static void new_conn(char *url, GlobalInfo *g )
{
ConnInfo *conn;
CURLMcode rc;
conn = (ConnInfo *)calloc(1, sizeof(ConnInfo));
memset(conn, 0, sizeof(ConnInfo));
conn->error[0]='\0';
conn->easy = curl_easy_init();
if ( !conn->easy )
{
fprintf(MSG_OUT, "\ncurl_easy_init() failed, exiting!");
exit(2);
}
conn->global = g;
conn->url = strdup(url);
curl_easy_setopt(conn->easy, CURLOPT_URL, conn->url);
curl_easy_setopt(conn->easy, CURLOPT_WRITEFUNCTION, write_cb);
curl_easy_setopt(conn->easy, CURLOPT_WRITEDATA, &conn);
curl_easy_setopt(conn->easy, CURLOPT_VERBOSE, 1L);
curl_easy_setopt(conn->easy, CURLOPT_ERRORBUFFER, conn->error);
curl_easy_setopt(conn->easy, CURLOPT_PRIVATE, conn);
curl_easy_setopt(conn->easy, CURLOPT_NOPROGRESS, 1L);
curl_easy_setopt(conn->easy, CURLOPT_PROGRESSFUNCTION, prog_cb);
curl_easy_setopt(conn->easy, CURLOPT_PROGRESSDATA, conn);
curl_easy_setopt(conn->easy, CURLOPT_LOW_SPEED_TIME, 3L);
curl_easy_setopt(conn->easy, CURLOPT_LOW_SPEED_LIMIT, 10L);
/* call this function to get a socket */
curl_easy_setopt(conn->easy, CURLOPT_OPENSOCKETFUNCTION, opensocket);
/* call this function to close a socket */
curl_easy_setopt(conn->easy, CURLOPT_CLOSESOCKETFUNCTION, closesocket);
fprintf(MSG_OUT,
"\nAdding easy %p to multi %p (%s)", conn->easy, g->multi, url);
rc = curl_multi_add_handle(g->multi, conn->easy);
mcode_or_die("new_conn: curl_multi_add_handle", rc);
/* note that the add_handle() will set a time-out to trigger very soon so
that the necessary socket_action() call will be called by this app */
}
int main(int argc, char **argv)
{
GlobalInfo g;
CURLMcode rc;
(void)argc;
(void)argv;
memset(&g, 0, sizeof(GlobalInfo));
g.multi = curl_multi_init();
curl_multi_setopt(g.multi, CURLMOPT_SOCKETFUNCTION, sock_cb);
curl_multi_setopt(g.multi, CURLMOPT_SOCKETDATA, &g);
curl_multi_setopt(g.multi, CURLMOPT_TIMERFUNCTION, multi_timer_cb);
curl_multi_setopt(g.multi, CURLMOPT_TIMERDATA, &g);
new_conn((char *)"www.google.com", &g); /* add a URL */
/* enter io_service run loop */
io_service.run();
curl_multi_cleanup(g.multi);
fprintf(MSG_OUT, "\ndone.\n");
return 0;
}

View File

@ -1,25 +1,13 @@
/***************************************************************************
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
* $Id: cacertinmem.c,v 1.3 2008-05-22 21:20:08 danf Exp $
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* Example using a "in core" PEM certificate to retrieve a https page.
* Example using a "in core" PEM certificate to retrieve a https page.
* Written by Theo Borm
*/
@ -124,7 +112,7 @@ int main(void)
rv=curl_easy_setopt(ch,CURLOPT_WRITEHEADER, stderr);
rv=curl_easy_setopt(ch,CURLOPT_SSLCERTTYPE,"PEM");
rv=curl_easy_setopt(ch,CURLOPT_SSL_VERIFYPEER,1L);
rv=curl_easy_setopt(ch, CURLOPT_URL, "https://www.example.com/");
rv=curl_easy_setopt(ch, CURLOPT_URL, "https://www.cacert.org/");
/* first try: retrieve page without cacerts' certificate -> will fail
*/

View File

@ -1,36 +1,17 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/*****************************************************************************
*/
#include <stdio.h>
#include <curl/curl.h>
#include <curl/types.h>
#include <curl/easy.h>
static size_t wrfu(void *ptr, size_t size, size_t nmemb, void *stream)
{
(void)stream;
(void)ptr;
return size * nmemb;
}
int main(void)
int main(int argc, char **argv)
{
CURL *curl;
CURLcode res;
@ -39,7 +20,7 @@ int main(void)
curl = curl_easy_init();
if(curl) {
curl_easy_setopt(curl, CURLOPT_URL, "https://www.example.com/");
curl_easy_setopt(curl, CURLOPT_URL, "https://www.networking4all.com/");
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, wrfu);
@ -52,24 +33,18 @@ int main(void)
res = curl_easy_perform(curl);
if(!res) {
union {
struct curl_slist *to_info;
struct curl_certinfo *to_certinfo;
} ptr;
struct curl_certinfo *ci = NULL;
ptr.to_info = NULL;
res = curl_easy_getinfo(curl, CURLINFO_CERTINFO, &ci);
res = curl_easy_getinfo(curl, CURLINFO_CERTINFO, &ptr.to_info);
if(!res && ptr.to_info) {
if(!res && ci) {
int i;
printf("%d certs!\n", ci->num_of_certs);
printf("%d certs!\n", ptr.to_certinfo->num_of_certs);
for(i = 0; i < ptr.to_certinfo->num_of_certs; i++) {
for(i=0; i<ci->num_of_certs; i++) {
struct curl_slist *slist;
for(slist = ptr.to_certinfo->certinfo[i]; slist; slist = slist->next)
for(slist = ci->certinfo[i]; slist; slist = slist->next)
printf("%s\n", slist->data);
}
@ -77,6 +52,7 @@ int main(void)
}
curl_easy_cleanup(curl);
}

View File

@ -1,176 +0,0 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* Example source code to show how the callback function can be used to
* download data into a chunk of memory instead of storing it in a file.
* After successful download we use curl_easy_getinfo() calls to get the
* amount of downloaded bytes, the time used for the whole download, and
* the average download speed.
* On Linux you can create the download test files with:
* dd if=/dev/urandom of=file_1M.bin bs=1M count=1
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <curl/curl.h>
#define URL_BASE "http://speedtest.your.domain/"
#define URL_1M URL_BASE "file_1M.bin"
#define URL_2M URL_BASE "file_2M.bin"
#define URL_5M URL_BASE "file_5M.bin"
#define URL_10M URL_BASE "file_10M.bin"
#define URL_20M URL_BASE "file_20M.bin"
#define URL_50M URL_BASE "file_50M.bin"
#define URL_100M URL_BASE "file_100M.bin"
#define CHKSPEED_VERSION "1.0"
static size_t WriteCallback(void *ptr, size_t size, size_t nmemb, void *data)
{
/* we are not interested in the downloaded bytes itself,
so we only return the size we would have saved ... */
(void)ptr; /* unused */
(void)data; /* unused */
return (size_t)(size * nmemb);
}
int main(int argc, char *argv[])
{
CURL *curl_handle;
CURLcode res;
int prtsep = 0, prttime = 0;
const char *url = URL_1M;
char *appname = argv[0];
if (argc > 1) {
/* parse input parameters */
for (argc--, argv++; *argv; argc--, argv++) {
if (strncasecmp(*argv, "-", 1) == 0) {
if (strncasecmp(*argv, "-H", 2) == 0) {
fprintf(stderr,
"\rUsage: %s [-m=1|2|5|10|20|50|100] [-t] [-x] [url]\n",
appname);
exit(1);
} else if (strncasecmp(*argv, "-V", 2) == 0) {
fprintf(stderr, "\r%s %s - %s\n",
appname, CHKSPEED_VERSION, curl_version());
exit(1);
} else if (strncasecmp(*argv, "-X", 2) == 0) {
prtsep = 1;
} else if (strncasecmp(*argv, "-T", 2) == 0) {
prttime = 1;
} else if (strncasecmp(*argv, "-M=", 3) == 0) {
long m = strtol((*argv)+3, NULL, 10);
switch(m) {
case 1: url = URL_1M;
break;
case 2: url = URL_2M;
break;
case 5: url = URL_5M;
break;
case 10: url = URL_10M;
break;
case 20: url = URL_20M;
break;
case 50: url = URL_50M;
break;
case 100: url = URL_100M;
break;
default: fprintf(stderr, "\r%s: invalid parameter %s\n",
appname, *argv + 3);
exit(1);
}
} else {
fprintf(stderr, "\r%s: invalid or unknown option %s\n",
appname, *argv);
exit(1);
}
} else {
url = *argv;
}
}
}
/* print separator line */
if (prtsep) {
printf("-------------------------------------------------\n");
}
/* print localtime */
if (prttime) {
time_t t = time(NULL);
printf("Localtime: %s", ctime(&t));
}
/* init libcurl */
curl_global_init(CURL_GLOBAL_ALL);
/* init the curl session */
curl_handle = curl_easy_init();
/* specify URL to get */
curl_easy_setopt(curl_handle, CURLOPT_URL, url);
/* send all data to this function */
curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, WriteCallback);
/* some servers don't like requests that are made without a user-agent
field, so we provide one */
curl_easy_setopt(curl_handle, CURLOPT_USERAGENT,
"libcurl-speedchecker/" CHKSPEED_VERSION);
/* get it! */
res = curl_easy_perform(curl_handle);
if(CURLE_OK == res) {
double val;
/* check for bytes downloaded */
res = curl_easy_getinfo(curl_handle, CURLINFO_SIZE_DOWNLOAD, &val);
if((CURLE_OK == res) && (val>0))
printf("Data downloaded: %0.0f bytes.\n", val);
/* check for total download time */
res = curl_easy_getinfo(curl_handle, CURLINFO_TOTAL_TIME, &val);
if((CURLE_OK == res) && (val>0))
printf("Total download time: %0.3f sec.\n", val);
/* check for average download speed */
res = curl_easy_getinfo(curl_handle, CURLINFO_SPEED_DOWNLOAD, &val);
if((CURLE_OK == res) && (val>0))
printf("Average download speed: %0.3f kbyte/sec.\n", val / 1024);
} else {
fprintf(stderr, "Error while fetching '%s' : %s\n",
url, curl_easy_strerror(res));
}
/* cleanup curl stuff */
curl_easy_cleanup(curl_handle);
/* we're done with libcurl, so clean it up */
curl_global_cleanup();
return 0;
}

View File

@ -1,25 +1,12 @@
/***************************************************************************
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* This example shows usage of simple cookie interface. */
* This example shows usage of simple cookie interface.
*/
#include <stdio.h>
#include <string.h>
@ -66,7 +53,7 @@ main(void)
if (curl) {
char nline[256];
curl_easy_setopt(curl, CURLOPT_URL, "http://www.example.com/");
curl_easy_setopt(curl, CURLOPT_URL, "http://www.google.com/"); /* google.com sets "PREF" cookie */
curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L);
curl_easy_setopt(curl, CURLOPT_COOKIEFILE, ""); /* just to start the cookie engine */
res = curl_easy_perform(curl);
@ -89,11 +76,11 @@ main(void)
#endif
/* Netscape format cookie */
snprintf(nline, sizeof(nline), "%s\t%s\t%s\t%s\t%lu\t%s\t%s",
".google.com", "TRUE", "/", "FALSE", (unsigned long)time(NULL) + 31337UL, "PREF", "hello google, i like you very much!");
".google.com", "TRUE", "/", "FALSE", time(NULL) + 31337, "PREF", "hello google, i like you very much!");
res = curl_easy_setopt(curl, CURLOPT_COOKIELIST, nline);
if (res != CURLE_OK) {
fprintf(stderr, "Curl curl_easy_setopt failed: %s\n", curl_easy_strerror(res));
return 1;
return 1;
}
/* HTTP-header style cookie */
@ -103,7 +90,7 @@ main(void)
res = curl_easy_setopt(curl, CURLOPT_COOKIELIST, nline);
if (res != CURLE_OK) {
fprintf(stderr, "Curl curl_easy_setopt failed: %s\n", curl_easy_strerror(res));
return 1;
return 1;
}
print_cookies(curl);

View File

@ -5,6 +5,7 @@
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* $Id: curlgtk.c,v 1.7 2008-05-22 21:20:08 danf Exp $
*/
/* Copyright (c) 2000 David Odin (aka DindinX) for MandrakeSoft */
/* an attempt to use the curl library in concert with a gtk-threaded application */
@ -13,6 +14,8 @@
#include <gtk/gtk.h>
#include <curl/curl.h>
#include <curl/types.h> /* new for v7 */
#include <curl/easy.h> /* new for v7 */
GtkWidget *Bar;

View File

@ -115,7 +115,7 @@ static const char *curlx_usage[]={
*/
/*
/*
* We use this ZERO_NULL to avoid picky compiler warnings,
* when assigning a NULL pointer to a function pointer var.
*/
@ -239,7 +239,8 @@ static CURLcode sslctxfun(CURL * curl, void * sslctx, void * parm) {
SSL_CTX_set_cipher_list(ctx,"RC4-MD5");
SSL_CTX_set_mode(ctx, SSL_MODE_AUTO_RETRY);
X509_STORE_add_cert(SSL_CTX_get_cert_store(ctx), sk_X509_value(p->ca, sk_X509_num(p->ca)-1));
X509_STORE_add_cert(ctx->cert_store,sk_X509_value(p->ca,
sk_X509_num(p->ca)-1));
SSL_CTX_set_verify_depth(ctx,2);
@ -452,7 +453,7 @@ int main(int argc, char **argv) {
{
FILE *outfp;
BIO_get_fp(out,&outfp);
curl_easy_setopt(p.curl, CURLOPT_WRITEDATA, outfp);
curl_easy_setopt(p.curl, CURLOPT_FILE,outfp);
}
res = curl_easy_setopt(p.curl, CURLOPT_SSL_CTX_FUNCTION, sslctxfun) ;

View File

@ -1,24 +1,13 @@
/***************************************************************************
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2013, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
* $Id: debug.c,v 1.4 2008-05-22 21:20:09 danf Exp $
*/
#include <stdio.h>
#include <curl/curl.h>
@ -40,12 +29,11 @@ void dump(const char *text,
/* without the hex output, we can fit more on screen */
width = 0x40;
fprintf(stream, "%s, %10.10ld bytes (0x%8.8lx)\n",
text, (long)size, (long)size);
fprintf(stream, "%s, %zd bytes (0x%zx)\n", text, size, size);
for(i=0; i<size; i+= width) {
fprintf(stream, "%4.4lx: ", (long)i);
fprintf(stream, "%04zx: ", i);
if(!nohex) {
/* hex not disabled, show it */
@ -130,15 +118,8 @@ int main(void)
/* the DEBUGFUNCTION has no effect until we enable VERBOSE */
curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L);
/* example.com is redirected, so we tell libcurl to follow redirection */
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
curl_easy_setopt(curl, CURLOPT_URL, "http://example.com/");
curl_easy_setopt(curl, CURLOPT_URL, "curl.haxx.se");
res = curl_easy_perform(curl);
/* Check for errors */
if(res != CURLE_OK)
fprintf(stderr, "curl_easy_perform() failed: %s\n",
curl_easy_strerror(res));
/* always cleanup */
curl_easy_cleanup(curl);

View File

@ -1,442 +0,0 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* Example application source code using the multi socket interface to
* download many files at once.
*
* This example features the same basic functionality as hiperfifo.c does,
* but this uses libev instead of libevent.
*
* Written by Jeff Pohlmeyer, converted to use libev by Markus Koetter
Requires libev and a (POSIX?) system that has mkfifo().
This is an adaptation of libcurl's "hipev.c" and libevent's "event-test.c"
sample programs.
When running, the program creates the named pipe "hiper.fifo"
Whenever there is input into the fifo, the program reads the input as a list
of URL's and creates some new easy handles to fetch each URL via the
curl_multi "hiper" API.
Thus, you can try a single URL:
% echo http://www.yahoo.com > hiper.fifo
Or a whole bunch of them:
% cat my-url-list > hiper.fifo
The fifo buffer is handled almost instantly, so you can even add more URL's
while the previous requests are still being downloaded.
Note:
For the sake of simplicity, URL length is limited to 1023 char's !
This is purely a demo app, all retrieved data is simply discarded by the write
callback.
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#include <sys/poll.h>
#include <curl/curl.h>
#include <ev.h>
#include <fcntl.h>
#include <sys/stat.h>
#include <errno.h>
#define DPRINT(x...) printf(x)
#define MSG_OUT stdout /* Send info to stdout, change to stderr if you want */
/* Global information, common to all connections */
typedef struct _GlobalInfo
{
struct ev_loop *loop;
struct ev_io fifo_event;
struct ev_timer timer_event;
CURLM *multi;
int still_running;
FILE* input;
} GlobalInfo;
/* Information associated with a specific easy handle */
typedef struct _ConnInfo
{
CURL *easy;
char *url;
GlobalInfo *global;
char error[CURL_ERROR_SIZE];
} ConnInfo;
/* Information associated with a specific socket */
typedef struct _SockInfo
{
curl_socket_t sockfd;
CURL *easy;
int action;
long timeout;
struct ev_io ev;
int evset;
GlobalInfo *global;
} SockInfo;
static void timer_cb(EV_P_ struct ev_timer *w, int revents);
/* Update the event timer after curl_multi library calls */
static int multi_timer_cb(CURLM *multi, long timeout_ms, GlobalInfo *g)
{
DPRINT("%s %li\n", __PRETTY_FUNCTION__, timeout_ms);
ev_timer_stop(g->loop, &g->timer_event);
if (timeout_ms > 0)
{
double t = timeout_ms / 1000;
ev_timer_init(&g->timer_event, timer_cb, t, 0.);
ev_timer_start(g->loop, &g->timer_event);
}else
timer_cb(g->loop, &g->timer_event, 0);
return 0;
}
/* Die if we get a bad CURLMcode somewhere */
static void mcode_or_die(const char *where, CURLMcode code)
{
if ( CURLM_OK != code )
{
const char *s;
switch ( code )
{
case CURLM_CALL_MULTI_PERFORM: s="CURLM_CALL_MULTI_PERFORM"; break;
case CURLM_BAD_HANDLE: s="CURLM_BAD_HANDLE"; break;
case CURLM_BAD_EASY_HANDLE: s="CURLM_BAD_EASY_HANDLE"; break;
case CURLM_OUT_OF_MEMORY: s="CURLM_OUT_OF_MEMORY"; break;
case CURLM_INTERNAL_ERROR: s="CURLM_INTERNAL_ERROR"; break;
case CURLM_UNKNOWN_OPTION: s="CURLM_UNKNOWN_OPTION"; break;
case CURLM_LAST: s="CURLM_LAST"; break;
default: s="CURLM_unknown";
break;
case CURLM_BAD_SOCKET: s="CURLM_BAD_SOCKET";
fprintf(MSG_OUT, "ERROR: %s returns %s\n", where, s);
/* ignore this error */
return;
}
fprintf(MSG_OUT, "ERROR: %s returns %s\n", where, s);
exit(code);
}
}
/* Check for completed transfers, and remove their easy handles */
static void check_multi_info(GlobalInfo *g)
{
char *eff_url;
CURLMsg *msg;
int msgs_left;
ConnInfo *conn;
CURL *easy;
CURLcode res;
fprintf(MSG_OUT, "REMAINING: %d\n", g->still_running);
while ((msg = curl_multi_info_read(g->multi, &msgs_left))) {
if (msg->msg == CURLMSG_DONE) {
easy = msg->easy_handle;
res = msg->data.result;
curl_easy_getinfo(easy, CURLINFO_PRIVATE, &conn);
curl_easy_getinfo(easy, CURLINFO_EFFECTIVE_URL, &eff_url);
fprintf(MSG_OUT, "DONE: %s => (%d) %s\n", eff_url, res, conn->error);
curl_multi_remove_handle(g->multi, easy);
free(conn->url);
curl_easy_cleanup(easy);
free(conn);
}
}
}
/* Called by libevent when we get action on a multi socket */
static void event_cb(EV_P_ struct ev_io *w, int revents)
{
DPRINT("%s w %p revents %i\n", __PRETTY_FUNCTION__, w, revents);
GlobalInfo *g = (GlobalInfo*) w->data;
CURLMcode rc;
int action = (revents&EV_READ?CURL_POLL_IN:0)|
(revents&EV_WRITE?CURL_POLL_OUT:0);
rc = curl_multi_socket_action(g->multi, w->fd, action, &g->still_running);
mcode_or_die("event_cb: curl_multi_socket_action", rc);
check_multi_info(g);
if ( g->still_running <= 0 )
{
fprintf(MSG_OUT, "last transfer done, kill timeout\n");
ev_timer_stop(g->loop, &g->timer_event);
}
}
/* Called by libevent when our timeout expires */
static void timer_cb(EV_P_ struct ev_timer *w, int revents)
{
DPRINT("%s w %p revents %i\n", __PRETTY_FUNCTION__, w, revents);
GlobalInfo *g = (GlobalInfo *)w->data;
CURLMcode rc;
rc = curl_multi_socket_action(g->multi, CURL_SOCKET_TIMEOUT, 0, &g->still_running);
mcode_or_die("timer_cb: curl_multi_socket_action", rc);
check_multi_info(g);
}
/* Clean up the SockInfo structure */
static void remsock(SockInfo *f, GlobalInfo *g)
{
printf("%s \n", __PRETTY_FUNCTION__);
if ( f )
{
if ( f->evset )
ev_io_stop(g->loop, &f->ev);
free(f);
}
}
/* Assign information to a SockInfo structure */
static void setsock(SockInfo*f, curl_socket_t s, CURL*e, int act, GlobalInfo*g)
{
printf("%s \n", __PRETTY_FUNCTION__);
int kind = (act&CURL_POLL_IN?EV_READ:0)|(act&CURL_POLL_OUT?EV_WRITE:0);
f->sockfd = s;
f->action = act;
f->easy = e;
if ( f->evset )
ev_io_stop(g->loop, &f->ev);
ev_io_init(&f->ev, event_cb, f->sockfd, kind);
f->ev.data = g;
f->evset=1;
ev_io_start(g->loop, &f->ev);
}
/* Initialize a new SockInfo structure */
static void addsock(curl_socket_t s, CURL *easy, int action, GlobalInfo *g)
{
SockInfo *fdp = calloc(sizeof(SockInfo), 1);
fdp->global = g;
setsock(fdp, s, easy, action, g);
curl_multi_assign(g->multi, s, fdp);
}
/* CURLMOPT_SOCKETFUNCTION */
static int sock_cb(CURL *e, curl_socket_t s, int what, void *cbp, void *sockp)
{
DPRINT("%s e %p s %i what %i cbp %p sockp %p\n",
__PRETTY_FUNCTION__, e, s, what, cbp, sockp);
GlobalInfo *g = (GlobalInfo*) cbp;
SockInfo *fdp = (SockInfo*) sockp;
const char *whatstr[]={ "none", "IN", "OUT", "INOUT", "REMOVE"};
fprintf(MSG_OUT,
"socket callback: s=%d e=%p what=%s ", s, e, whatstr[what]);
if ( what == CURL_POLL_REMOVE )
{
fprintf(MSG_OUT, "\n");
remsock(fdp, g);
} else
{
if ( !fdp )
{
fprintf(MSG_OUT, "Adding data: %s\n", whatstr[what]);
addsock(s, e, what, g);
} else
{
fprintf(MSG_OUT,
"Changing action from %s to %s\n",
whatstr[fdp->action], whatstr[what]);
setsock(fdp, s, e, what, g);
}
}
return 0;
}
/* CURLOPT_WRITEFUNCTION */
static size_t write_cb(void *ptr, size_t size, size_t nmemb, void *data)
{
size_t realsize = size * nmemb;
ConnInfo *conn = (ConnInfo*) data;
(void)ptr;
(void)conn;
return realsize;
}
/* CURLOPT_PROGRESSFUNCTION */
static int prog_cb (void *p, double dltotal, double dlnow, double ult,
double uln)
{
ConnInfo *conn = (ConnInfo *)p;
(void)ult;
(void)uln;
fprintf(MSG_OUT, "Progress: %s (%g/%g)\n", conn->url, dlnow, dltotal);
return 0;
}
/* Create a new easy handle, and add it to the global curl_multi */
static void new_conn(char *url, GlobalInfo *g )
{
ConnInfo *conn;
CURLMcode rc;
conn = calloc(1, sizeof(ConnInfo));
memset(conn, 0, sizeof(ConnInfo));
conn->error[0]='\0';
conn->easy = curl_easy_init();
if ( !conn->easy )
{
fprintf(MSG_OUT, "curl_easy_init() failed, exiting!\n");
exit(2);
}
conn->global = g;
conn->url = strdup(url);
curl_easy_setopt(conn->easy, CURLOPT_URL, conn->url);
curl_easy_setopt(conn->easy, CURLOPT_WRITEFUNCTION, write_cb);
curl_easy_setopt(conn->easy, CURLOPT_WRITEDATA, conn);
curl_easy_setopt(conn->easy, CURLOPT_VERBOSE, 1L);
curl_easy_setopt(conn->easy, CURLOPT_ERRORBUFFER, conn->error);
curl_easy_setopt(conn->easy, CURLOPT_PRIVATE, conn);
curl_easy_setopt(conn->easy, CURLOPT_NOPROGRESS, 0L);
curl_easy_setopt(conn->easy, CURLOPT_PROGRESSFUNCTION, prog_cb);
curl_easy_setopt(conn->easy, CURLOPT_PROGRESSDATA, conn);
curl_easy_setopt(conn->easy, CURLOPT_LOW_SPEED_TIME, 3L);
curl_easy_setopt(conn->easy, CURLOPT_LOW_SPEED_LIMIT, 10L);
fprintf(MSG_OUT,
"Adding easy %p to multi %p (%s)\n", conn->easy, g->multi, url);
rc = curl_multi_add_handle(g->multi, conn->easy);
mcode_or_die("new_conn: curl_multi_add_handle", rc);
/* note that the add_handle() will set a time-out to trigger very soon so
that the necessary socket_action() call will be called by this app */
}
/* This gets called whenever data is received from the fifo */
static void fifo_cb(EV_P_ struct ev_io *w, int revents)
{
char s[1024];
long int rv=0;
int n=0;
GlobalInfo *g = (GlobalInfo *)w->data;
do
{
s[0]='\0';
rv=fscanf(g->input, "%1023s%n", s, &n);
s[n]='\0';
if ( n && s[0] )
{
new_conn(s,g); /* if we read a URL, go get it! */
} else break;
} while ( rv != EOF );
}
/* Create a named pipe and tell libevent to monitor it */
static int init_fifo (GlobalInfo *g)
{
struct stat st;
static const char *fifo = "hiper.fifo";
curl_socket_t sockfd;
fprintf(MSG_OUT, "Creating named pipe \"%s\"\n", fifo);
if ( lstat (fifo, &st) == 0 )
{
if ( (st.st_mode & S_IFMT) == S_IFREG )
{
errno = EEXIST;
perror("lstat");
exit (1);
}
}
unlink(fifo);
if ( mkfifo (fifo, 0600) == -1 )
{
perror("mkfifo");
exit (1);
}
sockfd = open(fifo, O_RDWR | O_NONBLOCK, 0);
if ( sockfd == -1 )
{
perror("open");
exit (1);
}
g->input = fdopen(sockfd, "r");
fprintf(MSG_OUT, "Now, pipe some URL's into > %s\n", fifo);
ev_io_init(&g->fifo_event, fifo_cb, sockfd, EV_READ);
ev_io_start(g->loop, &g->fifo_event);
return(0);
}
int main(int argc, char **argv)
{
GlobalInfo g;
CURLMcode rc;
(void)argc;
(void)argv;
memset(&g, 0, sizeof(GlobalInfo));
g.loop = ev_default_loop(0);
init_fifo(&g);
g.multi = curl_multi_init();
ev_timer_init(&g.timer_event, timer_cb, 0., 0.);
g.timer_event.data = &g;
g.fifo_event.data = &g;
curl_multi_setopt(g.multi, CURLMOPT_SOCKETFUNCTION, sock_cb);
curl_multi_setopt(g.multi, CURLMOPT_SOCKETDATA, &g);
curl_multi_setopt(g.multi, CURLMOPT_TIMERFUNCTION, multi_timer_cb);
curl_multi_setopt(g.multi, CURLMOPT_TIMERDATA, &g);
/* we don't call any curl_multi_socket*() function yet as we have no handles
added! */
ev_loop(g.loop, 0);
curl_multi_cleanup(g.multi);
return 0;
}

View File

@ -1,153 +0,0 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/*
* This is an example demonstrating how an application can pass in a custom
* socket to libcurl to use. This example also handles the connect itself.
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <curl/curl.h>
#ifdef WIN32
#include <windows.h>
#include <winsock2.h>
#include <ws2tcpip.h>
#define close closesocket
#else
#include <sys/types.h> /* socket types */
#include <sys/socket.h> /* socket definitions */
#include <netinet/in.h>
#include <arpa/inet.h> /* inet (3) funtions */
#include <unistd.h> /* misc. UNIX functions */
#endif
#include <errno.h>
/* The IP address and port number to connect to */
#define IPADDR "127.0.0.1"
#define PORTNUM 80
#ifndef INADDR_NONE
#define INADDR_NONE 0xffffffff
#endif
static size_t write_data(void *ptr, size_t size, size_t nmemb, void *stream)
{
int written = fwrite(ptr, size, nmemb, (FILE *)stream);
return written;
}
static curl_socket_t opensocket(void *clientp,
curlsocktype purpose,
struct curl_sockaddr *address)
{
curl_socket_t sockfd;
(void)purpose;
(void)address;
sockfd = *(curl_socket_t *)clientp;
/* the actual externally set socket is passed in via the OPENSOCKETDATA
option */
return sockfd;
}
static int sockopt_callback(void *clientp, curl_socket_t curlfd,
curlsocktype purpose)
{
(void)clientp;
(void)curlfd;
(void)purpose;
/* This return code was added in libcurl 7.21.5 */
return CURL_SOCKOPT_ALREADY_CONNECTED;
}
int main(void)
{
CURL *curl;
CURLcode res;
struct sockaddr_in servaddr; /* socket address structure */
curl_socket_t sockfd;
#ifdef WIN32
WSADATA wsaData;
int initwsa;
if((initwsa = WSAStartup(MAKEWORD(2,0), &wsaData)) != 0) {
printf("WSAStartup failed: %d\n", initwsa);
return 1;
}
#endif
curl = curl_easy_init();
if(curl) {
/*
* Note that libcurl will internally think that you connect to the host
* and port that you specify in the URL option.
*/
curl_easy_setopt(curl, CURLOPT_URL, "http://99.99.99.99:9999");
/* Create the socket "manually" */
if( (sockfd = socket(AF_INET, SOCK_STREAM, 0)) == CURL_SOCKET_BAD ) {
printf("Error creating listening socket.\n");
return 3;
}
memset(&servaddr, 0, sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_port = htons(PORTNUM);
if (INADDR_NONE == (servaddr.sin_addr.s_addr = inet_addr(IPADDR)))
return 2;
if(connect(sockfd,(struct sockaddr *) &servaddr, sizeof(servaddr)) ==
-1) {
close(sockfd);
printf("client error: connect: %s\n", strerror(errno));
return 1;
}
/* no progress meter please */
curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 1L);
/* send all data to this function */
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_data);
/* call this function to get a socket */
curl_easy_setopt(curl, CURLOPT_OPENSOCKETFUNCTION, opensocket);
curl_easy_setopt(curl, CURLOPT_OPENSOCKETDATA, &sockfd);
/* call this function to set options for the socket */
curl_easy_setopt(curl, CURLOPT_SOCKOPTFUNCTION, sockopt_callback);
curl_easy_setopt(curl, CURLOPT_VERBOSE, 1);
res = curl_easy_perform(curl);
curl_easy_cleanup(curl);
if(res) {
printf("libcurl error: %d\n", res);
return 4;
}
}
return 0;
}

View File

@ -1,24 +1,13 @@
/***************************************************************************
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
* $Id: fileupload.c,v 1.5 2008-09-10 07:11:45 danf Exp $
*/
#include <stdio.h>
#include <curl/curl.h>
#include <sys/stat.h>
@ -64,21 +53,14 @@ int main(void)
curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L);
res = curl_easy_perform(curl);
/* Check for errors */
if(res != CURLE_OK) {
fprintf(stderr, "curl_easy_perform() failed: %s\n",
curl_easy_strerror(res));
}
else {
/* now extract transfer info */
curl_easy_getinfo(curl, CURLINFO_SPEED_UPLOAD, &speed_upload);
curl_easy_getinfo(curl, CURLINFO_TOTAL_TIME, &total_time);
/* now extract transfer info */
curl_easy_getinfo(curl, CURLINFO_SPEED_UPLOAD, &speed_upload);
curl_easy_getinfo(curl, CURLINFO_TOTAL_TIME, &total_time);
fprintf(stderr, "Speed: %.3f bytes/sec during %.3f seconds\n",
speed_upload, total_time);
fprintf(stderr, "Speed: %.3f bytes/sec during %.3f seconds\n",
speed_upload, total_time);
}
/* always cleanup */
curl_easy_cleanup(curl);
}

View File

@ -13,7 +13,7 @@
* See the main() function at the bottom that shows an app that retrives from a
* specified url using fgets() and fread() and saves as two output files.
*
* Copyright (c) 2003 Simtec Electronics
* Coyright (c)2003 Simtec Electronics
*
* Re-implemented by Vincent Sanders <vince@kyllikki.org> with extensive
* reference to original curl example code
@ -53,24 +53,20 @@
#include <curl/curl.h>
enum fcurl_type_e {
CFTYPE_NONE=0,
CFTYPE_FILE=1,
CFTYPE_CURL=2
};
enum fcurl_type_e { CFTYPE_NONE=0, CFTYPE_FILE=1, CFTYPE_CURL=2 };
struct fcurl_data
{
enum fcurl_type_e type; /* type of handle */
union {
CURL *curl;
FILE *file;
} handle; /* handle */
enum fcurl_type_e type; /* type of handle */
union {
CURL *curl;
FILE *file;
} handle; /* handle */
char *buffer; /* buffer to store cached data*/
size_t buffer_len; /* currently allocated buffers length */
size_t buffer_pos; /* end of data in buffer*/
int still_running; /* Is background url fetch still in progress */
char *buffer; /* buffer to store cached data*/
int buffer_len; /* currently allocated buffers length */
int buffer_pos; /* end of data in buffer*/
int still_running; /* Is background url fetch still in progress */
};
typedef struct fcurl_data URL_FILE;
@ -80,448 +76,491 @@ URL_FILE *url_fopen(const char *url,const char *operation);
int url_fclose(URL_FILE *file);
int url_feof(URL_FILE *file);
size_t url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file);
char * url_fgets(char *ptr, size_t size, URL_FILE *file);
char * url_fgets(char *ptr, int size, URL_FILE *file);
void url_rewind(URL_FILE *file);
/* we use a global one for convenience */
CURLM *multi_handle;
/* curl calls this routine to get more data */
static size_t write_callback(char *buffer,
size_t size,
size_t nitems,
void *userp)
static size_t
write_callback(char *buffer,
size_t size,
size_t nitems,
void *userp)
{
char *newbuff;
size_t rembuff;
char *newbuff;
int rembuff;
URL_FILE *url = (URL_FILE *)userp;
size *= nitems;
URL_FILE *url = (URL_FILE *)userp;
size *= nitems;
rembuff=url->buffer_len - url->buffer_pos; /* remaining space in buffer */
rembuff=url->buffer_len - url->buffer_pos; /* remaining space in buffer */
if(size > rembuff) {
/* not enough space in buffer */
newbuff=realloc(url->buffer,url->buffer_len + (size - rembuff));
if(newbuff==NULL) {
fprintf(stderr,"callback buffer grow failed\n");
size=rembuff;
if(size > rembuff)
{
/* not enough space in buffer */
newbuff=realloc(url->buffer,url->buffer_len + (size - rembuff));
if(newbuff==NULL)
{
fprintf(stderr,"callback buffer grow failed\n");
size=rembuff;
}
else
{
/* realloc suceeded increase buffer size*/
url->buffer_len+=size - rembuff;
url->buffer=newbuff;
/*printf("Callback buffer grown to %d bytes\n",url->buffer_len);*/
}
}
else {
/* realloc suceeded increase buffer size*/
url->buffer_len+=size - rembuff;
url->buffer=newbuff;
}
}
memcpy(&url->buffer[url->buffer_pos], buffer, size);
url->buffer_pos += size;
memcpy(&url->buffer[url->buffer_pos], buffer, size);
url->buffer_pos += size;
return size;
/*fprintf(stderr, "callback %d size bytes\n", size);*/
return size;
}
/* use to attempt to fill the read buffer up to requested number of bytes */
static int fill_buffer(URL_FILE *file, size_t want)
static int
fill_buffer(URL_FILE *file,int want,int waittime)
{
fd_set fdread;
fd_set fdwrite;
fd_set fdexcep;
struct timeval timeout;
int rc;
fd_set fdread;
fd_set fdwrite;
fd_set fdexcep;
int maxfd;
struct timeval timeout;
int rc;
/* only attempt to fill buffer if transactions still running and buffer
* doesnt exceed required size already
*/
if((!file->still_running) || (file->buffer_pos > want))
return 0;
/* only attempt to fill buffer if transactions still running and buffer
* doesnt exceed required size already
*/
if((!file->still_running) || (file->buffer_pos > want))
return 0;
/* attempt to fill buffer */
do {
int maxfd = -1;
long curl_timeo = -1;
/* attempt to fill buffer */
do
{
FD_ZERO(&fdread);
FD_ZERO(&fdwrite);
FD_ZERO(&fdexcep);
FD_ZERO(&fdread);
FD_ZERO(&fdwrite);
FD_ZERO(&fdexcep);
/* set a suitable timeout to fail on */
timeout.tv_sec = 60; /* 1 minute */
timeout.tv_usec = 0;
/* set a suitable timeout to fail on */
timeout.tv_sec = 60; /* 1 minute */
timeout.tv_usec = 0;
/* get file descriptors from the transfers */
curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd);
curl_multi_timeout(multi_handle, &curl_timeo);
if(curl_timeo >= 0) {
timeout.tv_sec = curl_timeo / 1000;
if(timeout.tv_sec > 1)
timeout.tv_sec = 1;
else
timeout.tv_usec = (curl_timeo % 1000) * 1000;
}
/* In a real-world program you OF COURSE check the return code of the
function calls, *and* you make sure that maxfd is bigger than -1
so that the call to select() below makes sense! */
/* get file descriptors from the transfers */
curl_multi_fdset(multi_handle, &fdread, &fdwrite, &fdexcep, &maxfd);
rc = select(maxfd+1, &fdread, &fdwrite, &fdexcep, &timeout);
/* In a real-world program you OF COURSE check the return code of the
function calls. On success, the value of maxfd is guaranteed to be
greater or equal than -1. We call select(maxfd + 1, ...), specially
in case of (maxfd == -1), we call select(0, ...), which is basically
equal to sleep. */
switch(rc) {
case -1:
/* select error */
break;
rc = select(maxfd+1, &fdread, &fdwrite, &fdexcep, &timeout);
case 0:
break;
switch(rc) {
case -1:
/* select error */
break;
default:
/* timeout or readable/writable sockets */
/* note we *could* be more efficient and not wait for
* CURLM_CALL_MULTI_PERFORM to clear here and check it on re-entry
* but that gets messy */
while(curl_multi_perform(multi_handle, &file->still_running) ==
CURLM_CALL_MULTI_PERFORM);
case 0:
default:
/* timeout or readable/writable sockets */
curl_multi_perform(multi_handle, &file->still_running);
break;
}
} while(file->still_running && (file->buffer_pos < want));
return 1;
break;
}
} while(file->still_running && (file->buffer_pos < want));
return 1;
}
/* use to remove want bytes from the front of a files buffer */
static int use_buffer(URL_FILE *file,int want)
static int
use_buffer(URL_FILE *file,int want)
{
/* sort out buffer */
if((file->buffer_pos - want) <=0) {
/* ditch buffer - write will recreate */
if(file->buffer)
free(file->buffer);
/* sort out buffer */
if((file->buffer_pos - want) <=0)
{
/* ditch buffer - write will recreate */
if(file->buffer)
free(file->buffer);
file->buffer=NULL;
file->buffer_pos=0;
file->buffer_len=0;
}
else {
/* move rest down make it available for later */
memmove(file->buffer,
&file->buffer[want],
(file->buffer_pos - want));
file->buffer_pos -= want;
}
return 0;
}
URL_FILE *url_fopen(const char *url,const char *operation)
{
/* this code could check for URLs or types in the 'url' and
basicly use the real fopen() for standard files */
URL_FILE *file;
(void)operation;
file = malloc(sizeof(URL_FILE));
if(!file)
return NULL;
memset(file, 0, sizeof(URL_FILE));
if((file->handle.file=fopen(url,operation)))
file->type = CFTYPE_FILE; /* marked as URL */
else {
file->type = CFTYPE_CURL; /* marked as URL */
file->handle.curl = curl_easy_init();
curl_easy_setopt(file->handle.curl, CURLOPT_URL, url);
curl_easy_setopt(file->handle.curl, CURLOPT_WRITEDATA, file);
curl_easy_setopt(file->handle.curl, CURLOPT_VERBOSE, 0L);
curl_easy_setopt(file->handle.curl, CURLOPT_WRITEFUNCTION, write_callback);
if(!multi_handle)
multi_handle = curl_multi_init();
curl_multi_add_handle(multi_handle, file->handle.curl);
/* lets start the fetch */
curl_multi_perform(multi_handle, &file->still_running);
if((file->buffer_pos == 0) && (!file->still_running)) {
/* if still_running is 0 now, we should return NULL */
/* make sure the easy handle is not in the multi handle anymore */
curl_multi_remove_handle(multi_handle, file->handle.curl);
/* cleanup */
curl_easy_cleanup(file->handle.curl);
free(file);
file = NULL;
file->buffer=NULL;
file->buffer_pos=0;
file->buffer_len=0;
}
}
return file;
else
{
/* move rest down make it available for later */
memmove(file->buffer,
&file->buffer[want],
(file->buffer_pos - want));
file->buffer_pos -= want;
}
return 0;
}
int url_fclose(URL_FILE *file)
URL_FILE *
url_fopen(const char *url,const char *operation)
{
int ret=0;/* default is good return */
/* this code could check for URLs or types in the 'url' and
basicly use the real fopen() for standard files */
switch(file->type) {
case CFTYPE_FILE:
ret=fclose(file->handle.file); /* passthrough */
break;
URL_FILE *file;
(void)operation;
case CFTYPE_CURL:
/* make sure the easy handle is not in the multi handle anymore */
curl_multi_remove_handle(multi_handle, file->handle.curl);
file = malloc(sizeof(URL_FILE));
if(!file)
return NULL;
/* cleanup */
curl_easy_cleanup(file->handle.curl);
break;
memset(file, 0, sizeof(URL_FILE));
default: /* unknown or supported type - oh dear */
ret=EOF;
errno=EBADF;
break;
}
if((file->handle.file=fopen(url,operation)))
{
file->type = CFTYPE_FILE; /* marked as URL */
}
else
{
file->type = CFTYPE_CURL; /* marked as URL */
file->handle.curl = curl_easy_init();
if(file->buffer)
free(file->buffer);/* free any allocated buffer space */
curl_easy_setopt(file->handle.curl, CURLOPT_URL, url);
curl_easy_setopt(file->handle.curl, CURLOPT_WRITEDATA, file);
curl_easy_setopt(file->handle.curl, CURLOPT_VERBOSE, 0L);
curl_easy_setopt(file->handle.curl, CURLOPT_WRITEFUNCTION, write_callback);
free(file);
if(!multi_handle)
multi_handle = curl_multi_init();
return ret;
curl_multi_add_handle(multi_handle, file->handle.curl);
/* lets start the fetch */
while(curl_multi_perform(multi_handle, &file->still_running) ==
CURLM_CALL_MULTI_PERFORM );
if((file->buffer_pos == 0) && (!file->still_running))
{
/* if still_running is 0 now, we should return NULL */
/* make sure the easy handle is not in the multi handle anymore */
curl_multi_remove_handle(multi_handle, file->handle.curl);
/* cleanup */
curl_easy_cleanup(file->handle.curl);
free(file);
file = NULL;
}
}
return file;
}
int url_feof(URL_FILE *file)
int
url_fclose(URL_FILE *file)
{
int ret=0;
int ret=0;/* default is good return */
switch(file->type) {
case CFTYPE_FILE:
ret=feof(file->handle.file);
break;
case CFTYPE_CURL:
if((file->buffer_pos == 0) && (!file->still_running))
ret = 1;
break;
default: /* unknown or supported type - oh dear */
ret=-1;
errno=EBADF;
break;
}
return ret;
}
size_t url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file)
{
size_t want;
switch(file->type) {
case CFTYPE_FILE:
want=fread(ptr,size,nmemb,file->handle.file);
break;
case CFTYPE_CURL:
want = nmemb * size;
fill_buffer(file,want);
/* check if theres data in the buffer - if not fill_buffer()
* either errored or EOF */
if(!file->buffer_pos)
return 0;
/* ensure only available data is considered */
if(file->buffer_pos < want)
want = file->buffer_pos;
/* xfer data to caller */
memcpy(ptr, file->buffer, want);
use_buffer(file,want);
want = want / size; /* number of items */
break;
default: /* unknown or supported type - oh dear */
want=0;
errno=EBADF;
break;
}
return want;
}
char *url_fgets(char *ptr, size_t size, URL_FILE *file)
{
size_t want = size - 1;/* always need to leave room for zero termination */
size_t loop;
switch(file->type) {
case CFTYPE_FILE:
ptr = fgets(ptr,size,file->handle.file);
break;
case CFTYPE_CURL:
fill_buffer(file,want);
/* check if theres data in the buffer - if not fill either errored or
* EOF */
if(!file->buffer_pos)
return NULL;
/* ensure only available data is considered */
if(file->buffer_pos < want)
want = file->buffer_pos;
/*buffer contains data */
/* look for newline or eof */
for(loop=0;loop < want;loop++) {
if(file->buffer[loop] == '\n') {
want=loop+1;/* include newline */
switch(file->type)
{
case CFTYPE_FILE:
ret=fclose(file->handle.file); /* passthrough */
break;
}
case CFTYPE_CURL:
/* make sure the easy handle is not in the multi handle anymore */
curl_multi_remove_handle(multi_handle, file->handle.curl);
/* cleanup */
curl_easy_cleanup(file->handle.curl);
break;
default: /* unknown or supported type - oh dear */
ret=EOF;
errno=EBADF;
break;
}
/* xfer data to caller */
memcpy(ptr, file->buffer, want);
ptr[want]=0;/* allways null terminate */
use_buffer(file,want);
break;
default: /* unknown or supported type - oh dear */
ptr=NULL;
errno=EBADF;
break;
}
return ptr;/*success */
}
void url_rewind(URL_FILE *file)
{
switch(file->type) {
case CFTYPE_FILE:
rewind(file->handle.file); /* passthrough */
break;
case CFTYPE_CURL:
/* halt transaction */
curl_multi_remove_handle(multi_handle, file->handle.curl);
/* restart */
curl_multi_add_handle(multi_handle, file->handle.curl);
/* ditch buffer - write will recreate - resets stream pos*/
if(file->buffer)
free(file->buffer);
free(file->buffer);/* free any allocated buffer space */
file->buffer=NULL;
file->buffer_pos=0;
file->buffer_len=0;
free(file);
break;
default: /* unknown or supported type - oh dear */
break;
}
return ret;
}
int
url_feof(URL_FILE *file)
{
int ret=0;
switch(file->type)
{
case CFTYPE_FILE:
ret=feof(file->handle.file);
break;
case CFTYPE_CURL:
if((file->buffer_pos == 0) && (!file->still_running))
ret = 1;
break;
default: /* unknown or supported type - oh dear */
ret=-1;
errno=EBADF;
break;
}
return ret;
}
size_t
url_fread(void *ptr, size_t size, size_t nmemb, URL_FILE *file)
{
size_t want;
switch(file->type)
{
case CFTYPE_FILE:
want=fread(ptr,size,nmemb,file->handle.file);
break;
case CFTYPE_CURL:
want = nmemb * size;
fill_buffer(file,want,1);
/* check if theres data in the buffer - if not fill_buffer()
* either errored or EOF */
if(!file->buffer_pos)
return 0;
/* ensure only available data is considered */
if(file->buffer_pos < want)
want = file->buffer_pos;
/* xfer data to caller */
memcpy(ptr, file->buffer, want);
use_buffer(file,want);
want = want / size; /* number of items - nb correct op - checked
* with glibc code*/
/*printf("(fread) return %d bytes %d left\n", want,file->buffer_pos);*/
break;
default: /* unknown or supported type - oh dear */
want=0;
errno=EBADF;
break;
}
return want;
}
char *
url_fgets(char *ptr, int size, URL_FILE *file)
{
int want = size - 1;/* always need to leave room for zero termination */
int loop;
switch(file->type)
{
case CFTYPE_FILE:
ptr = fgets(ptr,size,file->handle.file);
break;
case CFTYPE_CURL:
fill_buffer(file,want,1);
/* check if theres data in the buffer - if not fill either errored or
* EOF */
if(!file->buffer_pos)
return NULL;
/* ensure only available data is considered */
if(file->buffer_pos < want)
want = file->buffer_pos;
/*buffer contains data */
/* look for newline or eof */
for(loop=0;loop < want;loop++)
{
if(file->buffer[loop] == '\n')
{
want=loop+1;/* include newline */
break;
}
}
/* xfer data to caller */
memcpy(ptr, file->buffer, want);
ptr[want]=0;/* allways null terminate */
use_buffer(file,want);
/*printf("(fgets) return %d bytes %d left\n", want,file->buffer_pos);*/
break;
default: /* unknown or supported type - oh dear */
ptr=NULL;
errno=EBADF;
break;
}
return ptr;/*success */
}
void
url_rewind(URL_FILE *file)
{
switch(file->type)
{
case CFTYPE_FILE:
rewind(file->handle.file); /* passthrough */
break;
case CFTYPE_CURL:
/* halt transaction */
curl_multi_remove_handle(multi_handle, file->handle.curl);
/* restart */
curl_multi_add_handle(multi_handle, file->handle.curl);
/* ditch buffer - write will recreate - resets stream pos*/
if(file->buffer)
free(file->buffer);
file->buffer=NULL;
file->buffer_pos=0;
file->buffer_len=0;
break;
default: /* unknown or supported type - oh dear */
break;
}
}
/* Small main program to retrive from a url using fgets and fread saving the
* output to two test files (note the fgets method will corrupt binary files if
* they contain 0 chars */
int main(int argc, char *argv[])
int
main(int argc, char *argv[])
{
URL_FILE *handle;
FILE *outf;
URL_FILE *handle;
FILE *outf;
int nread;
char buffer[256];
const char *url;
int nread;
char buffer[256];
const char *url;
if(argc < 2)
url="http://192.168.7.3/testfile";/* default to testurl */
else
url=argv[1];/* use passed url */
if(argc < 2)
{
url="http://192.168.7.3/testfile";/* default to testurl */
}
else
{
url=argv[1];/* use passed url */
}
/* copy from url line by line with fgets */
outf=fopen("fgets.test","w+");
if(!outf) {
perror("couldn't open fgets output file\n");
return 1;
}
/* copy from url line by line with fgets */
outf=fopen("fgets.test","w+");
if(!outf)
{
perror("couldn't open fgets output file\n");
return 1;
}
handle = url_fopen(url, "r");
if(!handle)
{
printf("couldn't url_fopen() %s\n", url);
fclose(outf);
return 2;
}
while(!url_feof(handle))
{
url_fgets(buffer,sizeof(buffer),handle);
fwrite(buffer,1,strlen(buffer),outf);
}
url_fclose(handle);
handle = url_fopen(url, "r");
if(!handle) {
printf("couldn't url_fopen() %s\n", url);
fclose(outf);
return 2;
}
while(!url_feof(handle)) {
url_fgets(buffer,sizeof(buffer),handle);
fwrite(buffer,1,strlen(buffer),outf);
}
url_fclose(handle);
fclose(outf);
/* Copy from url with fread */
outf=fopen("fread.test","w+");
if(!outf) {
perror("couldn't open fread output file\n");
return 1;
}
/* Copy from url with fread */
outf=fopen("fread.test","w+");
if(!outf)
{
perror("couldn't open fread output file\n");
return 1;
}
handle = url_fopen("testfile", "r");
if(!handle) {
printf("couldn't url_fopen() testfile\n");
fclose(outf);
return 2;
}
do {
nread = url_fread(buffer, 1,sizeof(buffer), handle);
fwrite(buffer,1,nread,outf);
} while(nread);
url_fclose(handle);
handle = url_fopen("testfile", "r");
if(!handle) {
printf("couldn't url_fopen() testfile\n");
fclose(outf);
return 2;
}
do {
nread = url_fread(buffer, 1,sizeof(buffer), handle);
fwrite(buffer,1,nread,outf);
} while(nread);
url_fclose(handle);
fclose(outf);
/* Test rewind */
outf=fopen("rewind.test","w+");
if(!outf) {
perror("couldn't open fread output file\n");
return 1;
}
/* Test rewind */
outf=fopen("rewind.test","w+");
if(!outf)
{
perror("couldn't open fread output file\n");
return 1;
}
handle = url_fopen("testfile", "r");
if(!handle) {
printf("couldn't url_fopen() testfile\n");
fclose(outf);
return 2;
}
nread = url_fread(buffer, 1,sizeof(buffer), handle);
fwrite(buffer,1,nread,outf);
url_rewind(handle);
buffer[0]='\n';
fwrite(buffer,1,1,outf);
nread = url_fread(buffer, 1,sizeof(buffer), handle);
fwrite(buffer,1,nread,outf);
url_fclose(handle);
handle = url_fopen("testfile", "r");
if(!handle) {
printf("couldn't url_fopen() testfile\n");
fclose(outf);
return 2;
}
nread = url_fread(buffer, 1,sizeof(buffer), handle);
fwrite(buffer,1,nread,outf);
url_rewind(handle);
buffer[0]='\n';
fwrite(buffer,1,1,outf);
nread = url_fread(buffer, 1,sizeof(buffer), handle);
fwrite(buffer,1,nread,outf);
url_fclose(handle);
fclose(outf);
return 0;/* all done */
return 0;/* all done */
}

View File

@ -1,148 +0,0 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#include <curl/curl.h>
#include <stdio.h>
struct callback_data {
FILE *output;
};
static long file_is_comming(struct curl_fileinfo *finfo,
struct callback_data *data,
int remains);
static long file_is_downloaded(struct callback_data *data);
static size_t write_it(char *buff, size_t size, size_t nmemb,
void *cb_data);
int main(int argc, char **argv)
{
int rc = CURLE_OK;
/* curl easy handle */
CURL *handle;
/* help data */
struct callback_data data = { 0 };
/* global initialization */
rc = curl_global_init(CURL_GLOBAL_ALL);
if(rc)
return rc;
/* initialization of easy handle */
handle = curl_easy_init();
if(!handle) {
curl_global_cleanup();
return CURLE_OUT_OF_MEMORY;
}
/* turn on wildcard matching */
curl_easy_setopt(handle, CURLOPT_WILDCARDMATCH, 1L);
/* callback is called before download of concrete file started */
curl_easy_setopt(handle, CURLOPT_CHUNK_BGN_FUNCTION, file_is_comming);
/* callback is called after data from the file have been transferred */
curl_easy_setopt(handle, CURLOPT_CHUNK_END_FUNCTION, file_is_downloaded);
/* this callback will write contents into files */
curl_easy_setopt(handle, CURLOPT_WRITEFUNCTION, write_it);
/* put transfer data into callbacks */
curl_easy_setopt(handle, CURLOPT_CHUNK_DATA, &data);
curl_easy_setopt(handle, CURLOPT_WRITEDATA, &data);
/* curl_easy_setopt(handle, CURLOPT_VERBOSE, 1L); */
/* set an URL containing wildcard pattern (only in the last part) */
if(argc == 2)
curl_easy_setopt(handle, CURLOPT_URL, argv[1]);
else
curl_easy_setopt(handle, CURLOPT_URL, "ftp://example.com/test/*");
/* and start transfer! */
rc = curl_easy_perform(handle);
curl_easy_cleanup(handle);
curl_global_cleanup();
return rc;
}
static long file_is_comming(struct curl_fileinfo *finfo,
struct callback_data *data,
int remains)
{
printf("%3d %40s %10luB ", remains, finfo->filename,
(unsigned long)finfo->size);
switch(finfo->filetype) {
case CURLFILETYPE_DIRECTORY:
printf(" DIR\n");
break;
case CURLFILETYPE_FILE:
printf("FILE ");
break;
default:
printf("OTHER\n");
break;
}
if(finfo->filetype == CURLFILETYPE_FILE) {
/* do not transfer files >= 50B */
if(finfo->size > 50) {
printf("SKIPPED\n");
return CURL_CHUNK_BGN_FUNC_SKIP;
}
data->output = fopen(finfo->filename, "w");
if(!data->output) {
return CURL_CHUNK_BGN_FUNC_FAIL;
}
}
return CURL_CHUNK_BGN_FUNC_OK;
}
static long file_is_downloaded(struct callback_data *data)
{
if(data->output) {
printf("DOWNLOADED\n");
fclose(data->output);
data->output = 0x0;
}
return CURL_CHUNK_END_FUNC_OK;
}
static size_t write_it(char *buff, size_t size, size_t nmemb,
void *cb_data)
{
struct callback_data *data = cb_data;
size_t written = 0;
if(data->output)
written = fwrite(buff, size, nmemb, data->output);
else
/* listing output */
written = fwrite(buff, size, nmemb, stdout);
return written;
}

View File

@ -1,27 +1,18 @@
/***************************************************************************
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
* $Id: ftpget.c,v 1.8 2008-05-22 21:20:09 danf Exp $
*/
#include <stdio.h>
#include <curl/curl.h>
#include <curl/types.h>
#include <curl/easy.h>
/*
* This is an example showing how to get a single file from an FTP server.
@ -62,10 +53,12 @@ int main(void)
curl = curl_easy_init();
if(curl) {
/*
* You better replace the URL with one that works!
* Get curl 7.9.2 from sunet.se's FTP site. curl 7.9.2 is most likely not
* present there by the time you read this, so you'd better replace the
* URL with one that works!
*/
curl_easy_setopt(curl, CURLOPT_URL,
"ftp://ftp.example.com/pub/www/utilities/curl/curl-7.9.2.tar.gz");
"ftp://ftp.sunet.se/pub/www/utilities/curl/curl-7.9.2.tar.gz");
/* Define our callback to get called when there's data to be written */
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, my_fwrite);
/* Set a pointer to our struct to pass to the callback */

View File

@ -1,89 +0,0 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#include <stdio.h>
#include <string.h>
#include <curl/curl.h>
/*
* This is an example showing how to check a single file's size and mtime
* from an FTP server.
*/
static size_t throw_away(void *ptr, size_t size, size_t nmemb, void *data)
{
(void)ptr;
(void)data;
/* we are not interested in the headers itself,
so we only return the size we would have saved ... */
return (size_t)(size * nmemb);
}
int main(void)
{
char ftpurl[] = "ftp://ftp.example.com/gnu/binutils/binutils-2.19.1.tar.bz2";
CURL *curl;
CURLcode res;
long filetime = -1;
double filesize = 0.0;
const char *filename = strrchr(ftpurl, '/') + 1;
curl_global_init(CURL_GLOBAL_DEFAULT);
curl = curl_easy_init();
if(curl) {
curl_easy_setopt(curl, CURLOPT_URL, ftpurl);
/* No download if the file */
curl_easy_setopt(curl, CURLOPT_NOBODY, 1L);
/* Ask for filetime */
curl_easy_setopt(curl, CURLOPT_FILETIME, 1L);
/* No header output: TODO 14.1 http-style HEAD output for ftp */
curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, throw_away);
curl_easy_setopt(curl, CURLOPT_HEADER, 0L);
/* Switch on full protocol/debug output */
/* curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L); */
res = curl_easy_perform(curl);
if(CURLE_OK == res) {
/* http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html */
res = curl_easy_getinfo(curl, CURLINFO_FILETIME, &filetime);
if((CURLE_OK == res) && (filetime >= 0)) {
time_t file_time = (time_t)filetime;
printf("filetime %s: %s", filename, ctime(&file_time));
}
res = curl_easy_getinfo(curl, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &filesize);
if((CURLE_OK == res) && (filesize>0.0))
printf("filesize %s: %0.0f bytes\n", filename, filesize);
} else {
/* we failed */
fprintf(stderr, "curl told us %d\n", res);
}
/* always cleanup */
curl_easy_cleanup(curl);
}
curl_global_cleanup();
return 0;
}

View File

@ -1,27 +1,18 @@
/***************************************************************************
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
* $Id: ftpgetresp.c,v 1.4 2007-07-16 21:22:12 danf Exp $
*/
#include <stdio.h>
#include <curl/curl.h>
#include <curl/types.h>
#include <curl/easy.h>
/*
* Similar to ftpget.c but this also stores the received response-lines
@ -37,7 +28,7 @@ write_response(void *ptr, size_t size, size_t nmemb, void *data)
return fwrite(ptr, size, nmemb, writehere);
}
int main(void)
int main(int argc, char **argv)
{
CURL *curl;
CURLcode res;
@ -53,17 +44,13 @@ int main(void)
curl = curl_easy_init();
if(curl) {
/* Get a file listing from sunet */
curl_easy_setopt(curl, CURLOPT_URL, "ftp://ftp.example.com/");
curl_easy_setopt(curl, CURLOPT_URL, "ftp://ftp.sunet.se/");
curl_easy_setopt(curl, CURLOPT_WRITEDATA, ftpfile);
/* If you intend to use this on windows with a libcurl DLL, you must use
CURLOPT_WRITEFUNCTION as well */
curl_easy_setopt(curl, CURLOPT_HEADERFUNCTION, write_response);
curl_easy_setopt(curl, CURLOPT_WRITEHEADER, respfile);
res = curl_easy_perform(curl);
/* Check for errors */
if(res != CURLE_OK)
fprintf(stderr, "curl_easy_perform() failed: %s\n",
curl_easy_strerror(res));
/* always cleanup */
curl_easy_cleanup(curl);

View File

@ -1,101 +0,0 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
#include <stdio.h>
#include <curl/curl.h>
/*
* This is an example showing how to get a single file from an FTPS server.
* It delays the actual destination file creation until the first write
* callback so that it won't create an empty file in case the remote file
* doesn't exist or something else fails.
*/
struct FtpFile {
const char *filename;
FILE *stream;
};
static size_t my_fwrite(void *buffer, size_t size, size_t nmemb,
void *stream)
{
struct FtpFile *out=(struct FtpFile *)stream;
if(out && !out->stream) {
/* open file for writing */
out->stream=fopen(out->filename, "wb");
if(!out->stream)
return -1; /* failure, can't open file to write */
}
return fwrite(buffer, size, nmemb, out->stream);
}
int main(void)
{
CURL *curl;
CURLcode res;
struct FtpFile ftpfile={
"yourfile.bin", /* name to store the file as if succesful */
NULL
};
curl_global_init(CURL_GLOBAL_DEFAULT);
curl = curl_easy_init();
if(curl) {
/*
* You better replace the URL with one that works! Note that we use an
* FTP:// URL with standard explicit FTPS. You can also do FTPS:// URLs if
* you want to do the rarer kind of transfers: implicit.
*/
curl_easy_setopt(curl, CURLOPT_URL,
"ftp://user@server/home/user/file.txt");
/* Define our callback to get called when there's data to be written */
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, my_fwrite);
/* Set a pointer to our struct to pass to the callback */
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &ftpfile);
/* We activate SSL and we require it for both control and data */
curl_easy_setopt(curl, CURLOPT_USE_SSL, CURLUSESSL_ALL);
/* Switch on full protocol/debug output */
curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L);
res = curl_easy_perform(curl);
/* always cleanup */
curl_easy_cleanup(curl);
if(CURLE_OK != res) {
/* we failed */
fprintf(stderr, "curl told us %d\n", res);
}
}
if(ftpfile.stream)
fclose(ftpfile.stream); /* close the local file */
curl_global_cleanup();
return 0;
}

View File

@ -1,24 +1,13 @@
/***************************************************************************
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
* $Id: ftpupload.c,v 1.15 2008-05-22 21:20:09 danf Exp $
*/
#include <stdio.h>
#include <string.h>
@ -42,7 +31,7 @@
#define LOCAL_FILE "/tmp/uploadthis.txt"
#define UPLOAD_FILE_AS "while-uploading.txt"
#define REMOTE_URL "ftp://example.com/" UPLOAD_FILE_AS
#define REMOTE_URL "ftp://localhost/" UPLOAD_FILE_AS
#define RENAME_FILE_TO "renamed-and-fine.txt"
/* NOTE: if you want this example to work on Windows with libcurl as a
@ -51,26 +40,21 @@
variable's memory when passed in to it from an app like this. */
static size_t read_callback(void *ptr, size_t size, size_t nmemb, void *stream)
{
curl_off_t nread;
/* in real-world cases, this would probably get this data differently
as this fread() stuff is exactly what the library already would do
by default internally */
size_t retcode = fread(ptr, size, nmemb, stream);
nread = (curl_off_t)retcode;
fprintf(stderr, "*** We read %" CURL_FORMAT_CURL_OFF_T
" bytes from file\n", nread);
fprintf(stderr, "*** We read %d bytes from file\n", retcode);
return retcode;
}
int main(void)
int main(int argc, char **argv)
{
CURL *curl;
CURLcode res;
FILE *hd_src;
struct stat file_info;
curl_off_t fsize;
struct curl_slist *headerlist=NULL;
static const char buf_1 [] = "RNFR " UPLOAD_FILE_AS;
@ -81,9 +65,7 @@ int main(void)
printf("Couldnt open '%s': %s\n", LOCAL_FILE, strerror(errno));
return 1;
}
fsize = (curl_off_t)file_info.st_size;
printf("Local file size: %" CURL_FORMAT_CURL_OFF_T " bytes.\n", fsize);
printf("Local file size: %ld bytes.\n", file_info.st_size);
/* get a FILE * of the same file */
hd_src = fopen(LOCAL_FILE, "rb");
@ -118,14 +100,10 @@ int main(void)
curl_off_t. If you use CURLOPT_INFILESIZE (without _LARGE) you must
make sure that to pass in a type 'long' argument. */
curl_easy_setopt(curl, CURLOPT_INFILESIZE_LARGE,
(curl_off_t)fsize);
(curl_off_t)file_info.st_size);
/* Now run off and do what you've been told! */
res = curl_easy_perform(curl);
/* Check for errors */
if(res != CURLE_OK)
fprintf(stderr, "curl_easy_perform() failed: %s\n",
curl_easy_strerror(res));
/* clean up the FTP commands list */
curl_slist_free_all (headerlist);

View File

@ -1,25 +1,13 @@
/***************************************************************************
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
* $Id: ftpuploadresume.c,v 1.5 2008-08-31 12:12:35 yangtse Exp $
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* Upload to FTP, resuming failed transfers
* Upload to FTP, resuming failed transfers
*
* Compile for MinGW like this:
* gcc -Wall -pedantic -std=c99 ftpuploadwithresume.c -o ftpuploadresume.exe
@ -39,136 +27,133 @@
/* The MinGW headers are missing a few Win32 function definitions,
you shouldn't need this if you use VC++ */
#if defined(__MINGW32__) && !defined(__MINGW64__)
#ifdef __MINGW32__
int __cdecl _snscanf(const char * input, size_t length, const char * format, ...);
#endif
/* parse headers for Content-Length */
size_t getcontentlengthfunc(void *ptr, size_t size, size_t nmemb, void *stream)
{
int r;
long len = 0;
size_t getcontentlengthfunc(void *ptr, size_t size, size_t nmemb, void *stream) {
int r;
long len = 0;
/* _snscanf() is Win32 specific */
r = _snscanf(ptr, size * nmemb, "Content-Length: %ld\n", &len);
/* _snscanf() is Win32 specific */
r = _snscanf(ptr, size * nmemb, "Content-Length: %ld\n", &len);
if (r) /* Microsoft: we don't read the specs */
*((long *) stream) = len;
if (r) /* Microsoft: we don't read the specs */
*((long *) stream) = len;
return size * nmemb;
return size * nmemb;
}
/* discard downloaded data */
size_t discardfunc(void *ptr, size_t size, size_t nmemb, void *stream)
{
return size * nmemb;
size_t discardfunc(void *ptr, size_t size, size_t nmemb, void *stream) {
return size * nmemb;
}
/* read data to upload */
size_t readfunc(void *ptr, size_t size, size_t nmemb, void *stream)
{
FILE *f = stream;
size_t n;
FILE *f = stream;
size_t n;
if (ferror(f))
return CURL_READFUNC_ABORT;
if (ferror(f))
return CURL_READFUNC_ABORT;
n = fread(ptr, size, nmemb, f) * size;
n = fread(ptr, size, nmemb, f) * size;
return n;
return n;
}
int upload(CURL *curlhandle, const char * remotepath, const char * localpath,
long timeout, long tries)
{
FILE *f;
long uploaded_len = 0;
CURLcode r = CURLE_GOT_NOTHING;
int c;
FILE *f;
long uploaded_len = 0;
CURLcode r = CURLE_GOT_NOTHING;
int c;
f = fopen(localpath, "rb");
if (f == NULL) {
perror(NULL);
return 0;
}
f = fopen(localpath, "rb");
if (f == NULL) {
perror(NULL);
return 0;
}
curl_easy_setopt(curlhandle, CURLOPT_UPLOAD, 1L);
curl_easy_setopt(curlhandle, CURLOPT_UPLOAD, 1L);
curl_easy_setopt(curlhandle, CURLOPT_URL, remotepath);
curl_easy_setopt(curlhandle, CURLOPT_URL, remotepath);
if (timeout)
curl_easy_setopt(curlhandle, CURLOPT_FTP_RESPONSE_TIMEOUT, timeout);
if (timeout)
curl_easy_setopt(curlhandle, CURLOPT_FTP_RESPONSE_TIMEOUT, timeout);
curl_easy_setopt(curlhandle, CURLOPT_HEADERFUNCTION, getcontentlengthfunc);
curl_easy_setopt(curlhandle, CURLOPT_HEADERDATA, &uploaded_len);
curl_easy_setopt(curlhandle, CURLOPT_HEADERFUNCTION, getcontentlengthfunc);
curl_easy_setopt(curlhandle, CURLOPT_HEADERDATA, &uploaded_len);
curl_easy_setopt(curlhandle, CURLOPT_WRITEFUNCTION, discardfunc);
curl_easy_setopt(curlhandle, CURLOPT_WRITEFUNCTION, discardfunc);
curl_easy_setopt(curlhandle, CURLOPT_READFUNCTION, readfunc);
curl_easy_setopt(curlhandle, CURLOPT_READDATA, f);
curl_easy_setopt(curlhandle, CURLOPT_READFUNCTION, readfunc);
curl_easy_setopt(curlhandle, CURLOPT_READDATA, f);
curl_easy_setopt(curlhandle, CURLOPT_FTPPORT, "-"); /* disable passive mode */
curl_easy_setopt(curlhandle, CURLOPT_FTP_CREATE_MISSING_DIRS, 1L);
curl_easy_setopt(curlhandle, CURLOPT_FTPPORT, "-"); /* disable passive mode */
curl_easy_setopt(curlhandle, CURLOPT_FTP_CREATE_MISSING_DIRS, 1L);
curl_easy_setopt(curlhandle, CURLOPT_VERBOSE, 1L);
curl_easy_setopt(curlhandle, CURLOPT_VERBOSE, 1L);
for (c = 0; (r != CURLE_OK) && (c < tries); c++) {
/* are we resuming? */
if (c) { /* yes */
/* determine the length of the file already written */
for (c = 0; (r != CURLE_OK) && (c < tries); c++) {
/* are we resuming? */
if (c) { /* yes */
/* determine the length of the file already written */
/*
* With NOBODY and NOHEADER, libcurl will issue a SIZE
* command, but the only way to retrieve the result is
* to parse the returned Content-Length header. Thus,
* getcontentlengthfunc(). We need discardfunc() above
* because HEADER will dump the headers to stdout
* without it.
*/
curl_easy_setopt(curlhandle, CURLOPT_NOBODY, 1L);
curl_easy_setopt(curlhandle, CURLOPT_HEADER, 1L);
/*
* With NOBODY and NOHEADER, libcurl will issue a SIZE
* command, but the only way to retrieve the result is
* to parse the returned Content-Length header. Thus,
* getcontentlengthfunc(). We need discardfunc() above
* because HEADER will dump the headers to stdout
* without it.
*/
curl_easy_setopt(curlhandle, CURLOPT_NOBODY, 1L);
curl_easy_setopt(curlhandle, CURLOPT_HEADER, 1L);
r = curl_easy_perform(curlhandle);
if (r != CURLE_OK)
continue;
r = curl_easy_perform(curlhandle);
if (r != CURLE_OK)
continue;
curl_easy_setopt(curlhandle, CURLOPT_NOBODY, 0L);
curl_easy_setopt(curlhandle, CURLOPT_HEADER, 0L);
curl_easy_setopt(curlhandle, CURLOPT_NOBODY, 0L);
curl_easy_setopt(curlhandle, CURLOPT_HEADER, 0L);
fseek(f, uploaded_len, SEEK_SET);
fseek(f, uploaded_len, SEEK_SET);
curl_easy_setopt(curlhandle, CURLOPT_APPEND, 1L);
}
else { /* no */
curl_easy_setopt(curlhandle, CURLOPT_APPEND, 0L);
}
curl_easy_setopt(curlhandle, CURLOPT_APPEND, 1L);
}
else { /* no */
curl_easy_setopt(curlhandle, CURLOPT_APPEND, 0L);
}
r = curl_easy_perform(curlhandle);
}
r = curl_easy_perform(curlhandle);
}
fclose(f);
fclose(f);
if (r == CURLE_OK)
return 1;
else {
fprintf(stderr, "%s\n", curl_easy_strerror(r));
return 0;
}
if (r == CURLE_OK)
return 1;
else {
fprintf(stderr, "%s\n", curl_easy_strerror(r));
return 0;
}
}
int main(int c, char **argv)
{
CURL *curlhandle = NULL;
int main(int c, char **argv) {
CURL *curlhandle = NULL;
curl_global_init(CURL_GLOBAL_ALL);
curlhandle = curl_easy_init();
curl_global_init(CURL_GLOBAL_ALL);
curlhandle = curl_easy_init();
upload(curlhandle, "ftp://user:pass@example.com/path/file", "C:\\file", 0, 3);
upload(curlhandle, "ftp://user:pass@host/path/file", "C:\\file", 0, 3);
curl_easy_cleanup(curlhandle);
curl_global_cleanup();
curl_easy_cleanup(curlhandle);
curl_global_cleanup();
return 0;
return 0;
}

View File

@ -1,24 +1,13 @@
/***************************************************************************
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
* $Id: getinfo.c,v 1.2 2004/11/22 16:24:46 bagder Exp $
*/
#include <stdio.h>
#include <curl/curl.h>
@ -31,7 +20,7 @@ int main(void)
curl = curl_easy_init();
if(curl) {
/* http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTURL */
curl_easy_setopt(curl, CURLOPT_URL, "http://www.example.com/");
curl_easy_setopt(curl, CURLOPT_URL, "curl.haxx.se");
/* http://curl.haxx.se/libcurl/c/curl_easy_perform.html */
res = curl_easy_perform(curl);

View File

@ -1,26 +1,16 @@
/***************************************************************************
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
* $Id: getinmemory.c,v 1.13 2008-09-06 04:28:45 yangtse Exp $
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* Example source code to show how the callback function can be used to
* Example source code to show how the callback function can be used to
* download data into a chunk of memory instead of storing it in a file.
*
* This exact source code has not been verified to work.
*/
#include <stdio.h>
@ -28,41 +18,48 @@
#include <string.h>
#include <curl/curl.h>
#include <curl/types.h>
#include <curl/easy.h>
struct MemoryStruct {
char *memory;
size_t size;
};
static void *myrealloc(void *ptr, size_t size);
static void *myrealloc(void *ptr, size_t size)
{
/* There might be a realloc() out there that doesn't like reallocing
NULL pointers, so we take care of it here */
if(ptr)
return realloc(ptr, size);
else
return malloc(size);
}
static size_t
WriteMemoryCallback(void *contents, size_t size, size_t nmemb, void *userp)
WriteMemoryCallback(void *ptr, size_t size, size_t nmemb, void *data)
{
size_t realsize = size * nmemb;
struct MemoryStruct *mem = (struct MemoryStruct *)userp;
struct MemoryStruct *mem = (struct MemoryStruct *)data;
mem->memory = realloc(mem->memory, mem->size + realsize + 1);
if (mem->memory == NULL) {
/* out of memory! */
printf("not enough memory (realloc returned NULL)\n");
exit(EXIT_FAILURE);
mem->memory = myrealloc(mem->memory, mem->size + realsize + 1);
if (mem->memory) {
memcpy(&(mem->memory[mem->size]), ptr, realsize);
mem->size += realsize;
mem->memory[mem->size] = 0;
}
memcpy(&(mem->memory[mem->size]), contents, realsize);
mem->size += realsize;
mem->memory[mem->size] = 0;
return realsize;
}
int main(void)
int main(int argc, char **argv)
{
CURL *curl_handle;
struct MemoryStruct chunk;
chunk.memory = malloc(1); /* will be grown as needed by the realloc above */
chunk.memory=NULL; /* we expect realloc(NULL, size) to work */
chunk.size = 0; /* no data at this point */
curl_global_init(CURL_GLOBAL_ALL);
@ -71,7 +68,7 @@ int main(void)
curl_handle = curl_easy_init();
/* specify URL to get */
curl_easy_setopt(curl_handle, CURLOPT_URL, "http://www.example.com/");
curl_easy_setopt(curl_handle, CURLOPT_URL, "http://cool.haxx.se/");
/* send all data to this function */
curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION, WriteMemoryCallback);
@ -100,8 +97,6 @@ int main(void)
* you're done with it, you should free() it as a nice application.
*/
printf("%lu bytes retrieved\n", (long)chunk.size);
if(chunk.memory)
free(chunk.memory);

View File

@ -1,25 +1,13 @@
/***************************************************************************
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
* $Id: ghiper.c,v 1.5 2008-05-22 21:20:09 danf Exp $
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* Example application source code using the multi socket interface to
* Example application source code using the multi socket interface to
* download many files at once.
*
* Written by Jeff Pohlmeyer
@ -71,7 +59,10 @@ callback.
typedef struct _GlobalInfo {
CURLM *multi;
guint timer_event;
int prev_running;
int still_running;
int requested; /* count: curl_easy_init() */
int completed; /* count: curl_easy_cleanup() */
} GlobalInfo;
@ -105,6 +96,7 @@ static void mcode_or_die(const char *where, CURLMcode code) {
const char *s;
switch (code) {
case CURLM_CALL_MULTI_PERFORM: s="CURLM_CALL_MULTI_PERFORM"; break;
case CURLM_OK: s="CURLM_OK"; break;
case CURLM_BAD_HANDLE: s="CURLM_BAD_HANDLE"; break;
case CURLM_BAD_EASY_HANDLE: s="CURLM_BAD_EASY_HANDLE"; break;
case CURLM_OUT_OF_MEMORY: s="CURLM_OUT_OF_MEMORY"; break;
@ -122,43 +114,62 @@ static void mcode_or_die(const char *where, CURLMcode code) {
/* Check for completed transfers, and remove their easy handles */
static void check_multi_info(GlobalInfo *g)
static void check_run_count(GlobalInfo *g)
{
char *eff_url;
CURLMsg *msg;
int msgs_left;
ConnInfo *conn;
CURL *easy;
CURLcode res;
if (g->prev_running > g->still_running) {
char *eff_url=NULL;
CURLMsg *msg;
int msgs_left;
ConnInfo *conn=NULL;
CURL*easy;
CURLcode res;
MSG_OUT("REMAINING: %d\n", g->still_running);
while ((msg = curl_multi_info_read(g->multi, &msgs_left))) {
if (msg->msg == CURLMSG_DONE) {
easy = msg->easy_handle;
res = msg->data.result;
curl_easy_getinfo(easy, CURLINFO_PRIVATE, &conn);
curl_easy_getinfo(easy, CURLINFO_EFFECTIVE_URL, &eff_url);
MSG_OUT("DONE: %s => (%d) %s\n", eff_url, res, conn->error);
curl_multi_remove_handle(g->multi, easy);
free(conn->url);
curl_easy_cleanup(easy);
free(conn);
}
MSG_OUT("REMAINING: %d\n", g->still_running);
/*
I am still uncertain whether it is safe to remove an easy handle
from inside the curl_multi_info_read loop, so here I will search
for completed transfers in the inner "while" loop, and then remove
them in the outer "do-while" loop...
*/
do {
easy=NULL;
while ((msg = curl_multi_info_read(g->multi, &msgs_left))) {
if (msg->msg == CURLMSG_DONE) {
easy=msg->easy_handle;
res=msg->data.result;
break;
}
}
if (easy) {
curl_easy_getinfo(easy, CURLINFO_PRIVATE, &conn);
curl_easy_getinfo(easy, CURLINFO_EFFECTIVE_URL, &eff_url);
MSG_OUT("DONE: %s => (%d) %s\n", eff_url, res, conn->error);
curl_multi_remove_handle(g->multi, easy);
g_free(conn->url);
curl_easy_cleanup(easy);
g_free(conn);
g->completed++;
}
} while ( easy );
MSG_OUT("Requested: %d Completed:%d\n", g->requested, g->completed);
}
g->prev_running = g->still_running;
}
/* Called by glib when our timeout expires */
static gboolean timer_cb(gpointer data)
{
GlobalInfo *g = (GlobalInfo *)data;
CURLMcode rc;
rc = curl_multi_socket_action(g->multi,
CURL_SOCKET_TIMEOUT, 0, &g->still_running);
mcode_or_die("timer_cb: curl_multi_socket_action", rc);
check_multi_info(g);
do {
rc = curl_multi_socket(g->multi, CURL_SOCKET_TIMEOUT, &g->still_running);
} while (rc == CURLM_CALL_MULTI_PERFORM);
mcode_or_die("timer_cb: curl_multi_socket", rc);
check_run_count(g);
return FALSE;
}
@ -188,15 +199,11 @@ static gboolean event_cb(GIOChannel *ch, GIOCondition condition, gpointer data)
GlobalInfo *g = (GlobalInfo*) data;
CURLMcode rc;
int fd=g_io_channel_unix_get_fd(ch);
int action =
(condition & G_IO_IN ? CURL_CSELECT_IN : 0) |
(condition & G_IO_OUT ? CURL_CSELECT_OUT : 0);
rc = curl_multi_socket_action(g->multi, fd, action, &g->still_running);
mcode_or_die("event_cb: curl_multi_socket_action", rc);
check_multi_info(g);
do {
rc = curl_multi_socket(g->multi, fd, &g->still_running);
} while (rc == CURLM_CALL_MULTI_PERFORM);
mcode_or_die("event_cb: curl_multi_socket", rc);
check_run_count(g);
if(g->still_running) {
return TRUE;
} else {
@ -332,9 +339,12 @@ static void new_conn(char *url, GlobalInfo *g )
MSG_OUT("Adding easy %p to multi %p (%s)\n", conn->easy, g->multi, url);
rc =curl_multi_add_handle(g->multi, conn->easy);
mcode_or_die("new_conn: curl_multi_add_handle", rc);
/* note that the add_handle() will set a time-out to trigger very soon so
that the necessary socket_action() call will be called by this app */
g->requested++;
do {
rc = curl_multi_socket_all(g->multi, &g->still_running);
} while (CURLM_CALL_MULTI_PERFORM == rc);
mcode_or_die("new_conn: curl_multi_socket_all", rc);
check_run_count(g);
}
@ -442,10 +452,9 @@ int main(int argc, char **argv)
curl_multi_setopt(g->multi, CURLMOPT_SOCKETDATA, g);
curl_multi_setopt(g->multi, CURLMOPT_TIMERFUNCTION, update_timeout_cb);
curl_multi_setopt(g->multi, CURLMOPT_TIMERDATA, g);
/* we don't call any curl_multi_socket*() function yet as we have no handles
added! */
do {
rc = curl_multi_socket_all(g->multi, &g->still_running);
} while (CURLM_CALL_MULTI_PERFORM == rc);
g_main_loop_run(gmain);
curl_multi_cleanup(g->multi);
return 0;

View File

@ -1,28 +1,16 @@
/***************************************************************************
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
* $Id: hiperfifo.c,v 1.7 2008-05-22 21:20:09 danf Exp $
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
* Example application source code using the multi socket interface to
* download many files at once.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* Example application source code using the multi socket interface to
download many files at once.
Written by Jeff Pohlmeyer
* Written by Jeff Pohlmeyer
Requires libevent and a (POSIX?) system that has mkfifo().
@ -75,6 +63,7 @@ typedef struct _GlobalInfo {
struct event fifo_event;
struct event timer_event;
CURLM *multi;
int prev_running;
int still_running;
FILE* input;
} GlobalInfo;
@ -122,6 +111,7 @@ static void mcode_or_die(const char *where, CURLMcode code)
const char *s;
switch (code) {
case CURLM_CALL_MULTI_PERFORM: s="CURLM_CALL_MULTI_PERFORM"; break;
case CURLM_OK: s="CURLM_OK"; break;
case CURLM_BAD_HANDLE: s="CURLM_BAD_HANDLE"; break;
case CURLM_BAD_EASY_HANDLE: s="CURLM_BAD_EASY_HANDLE"; break;
case CURLM_OUT_OF_MEMORY: s="CURLM_OUT_OF_MEMORY"; break;
@ -143,29 +133,44 @@ static void mcode_or_die(const char *where, CURLMcode code)
/* Check for completed transfers, and remove their easy handles */
static void check_multi_info(GlobalInfo *g)
static void check_run_count(GlobalInfo *g)
{
char *eff_url;
CURLMsg *msg;
int msgs_left;
ConnInfo *conn;
CURL *easy;
CURLcode res;
if (g->prev_running > g->still_running) {
char *eff_url=NULL;
CURLMsg *msg;
int msgs_left;
ConnInfo *conn=NULL;
CURL*easy;
CURLcode res;
fprintf(MSG_OUT, "REMAINING: %d\n", g->still_running);
while ((msg = curl_multi_info_read(g->multi, &msgs_left))) {
if (msg->msg == CURLMSG_DONE) {
easy = msg->easy_handle;
res = msg->data.result;
curl_easy_getinfo(easy, CURLINFO_PRIVATE, &conn);
curl_easy_getinfo(easy, CURLINFO_EFFECTIVE_URL, &eff_url);
fprintf(MSG_OUT, "DONE: %s => (%d) %s\n", eff_url, res, conn->error);
curl_multi_remove_handle(g->multi, easy);
free(conn->url);
curl_easy_cleanup(easy);
free(conn);
}
fprintf(MSG_OUT, "REMAINING: %d\n", g->still_running);
/*
I am still uncertain whether it is safe to remove an easy handle
from inside the curl_multi_info_read loop, so here I will search
for completed transfers in the inner "while" loop, and then remove
them in the outer "do-while" loop...
*/
do {
easy=NULL;
while ((msg = curl_multi_info_read(g->multi, &msgs_left))) {
if (msg->msg == CURLMSG_DONE) {
easy=msg->easy_handle;
res=msg->data.result;
break;
}
}
if (easy) {
curl_easy_getinfo(easy, CURLINFO_PRIVATE, &conn);
curl_easy_getinfo(easy, CURLINFO_EFFECTIVE_URL, &eff_url);
fprintf(MSG_OUT, "DONE: %s => (%d) %s\n", eff_url, res, conn->error);
curl_multi_remove_handle(g->multi, easy);
free(conn->url);
curl_easy_cleanup(easy);
free(conn);
}
} while ( easy );
}
g->prev_running = g->still_running;
}
@ -175,15 +180,13 @@ static void event_cb(int fd, short kind, void *userp)
{
GlobalInfo *g = (GlobalInfo*) userp;
CURLMcode rc;
(void)kind; /* unused */
int action =
(kind & EV_READ ? CURL_CSELECT_IN : 0) |
(kind & EV_WRITE ? CURL_CSELECT_OUT : 0);
rc = curl_multi_socket_action(g->multi, fd, action, &g->still_running);
mcode_or_die("event_cb: curl_multi_socket_action", rc);
check_multi_info(g);
do {
rc = curl_multi_socket(g->multi, fd, &g->still_running);
} while (rc == CURLM_CALL_MULTI_PERFORM);
mcode_or_die("event_cb: curl_multi_socket", rc);
check_run_count(g);
if ( g->still_running <= 0 ) {
fprintf(MSG_OUT, "last transfer done, kill timeout\n");
if (evtimer_pending(&g->timer_event, NULL)) {
@ -202,10 +205,11 @@ static void timer_cb(int fd, short kind, void *userp)
(void)fd;
(void)kind;
rc = curl_multi_socket_action(g->multi,
CURL_SOCKET_TIMEOUT, 0, &g->still_running);
mcode_or_die("timer_cb: curl_multi_socket_action", rc);
check_multi_info(g);
do {
rc = curl_multi_socket(g->multi, CURL_SOCKET_TIMEOUT, &g->still_running);
} while (rc == CURLM_CALL_MULTI_PERFORM);
mcode_or_die("timer_cb: curl_multi_socket", rc);
check_run_count(g);
}
@ -331,11 +335,13 @@ static void new_conn(char *url, GlobalInfo *g )
curl_easy_setopt(conn->easy, CURLOPT_PROGRESSDATA, conn);
fprintf(MSG_OUT,
"Adding easy %p to multi %p (%s)\n", conn->easy, g->multi, url);
rc = curl_multi_add_handle(g->multi, conn->easy);
rc =curl_multi_add_handle(g->multi, conn->easy);
mcode_or_die("new_conn: curl_multi_add_handle", rc);
/* note that the add_handle() will set a time-out to trigger very soon so
that the necessary socket_action() call will be called by this app */
do {
rc = curl_multi_socket_all(g->multi, &g->still_running);
} while (CURLM_CALL_MULTI_PERFORM == rc);
mcode_or_die("new_conn: curl_multi_socket_all", rc);
check_run_count(g);
}
/* This gets called whenever data is received from the fifo */
@ -363,7 +369,7 @@ static int init_fifo (GlobalInfo *g)
{
struct stat st;
static const char *fifo = "hiper.fifo";
curl_socket_t sockfd;
int sockfd;
fprintf(MSG_OUT, "Creating named pipe \"%s\"\n", fifo);
if (lstat (fifo, &st) == 0) {
@ -394,6 +400,7 @@ static int init_fifo (GlobalInfo *g)
int main(int argc, char **argv)
{
GlobalInfo g;
CURLMcode rc;
(void)argc;
(void)argv;
@ -402,16 +409,13 @@ int main(int argc, char **argv)
init_fifo(&g);
g.multi = curl_multi_init();
evtimer_set(&g.timer_event, timer_cb, &g);
/* setup the generic multi interface options we want */
curl_multi_setopt(g.multi, CURLMOPT_SOCKETFUNCTION, sock_cb);
curl_multi_setopt(g.multi, CURLMOPT_SOCKETDATA, &g);
curl_multi_setopt(g.multi, CURLMOPT_TIMERFUNCTION, multi_timer_cb);
curl_multi_setopt(g.multi, CURLMOPT_TIMERDATA, &g);
/* we don't call any curl_multi_socket*() function yet as we have no handles
added! */
do {
rc = curl_multi_socket_all(g.multi, &g.still_running);
} while (CURLM_CALL_MULTI_PERFORM == rc);
event_dispatch();
curl_multi_cleanup(g.multi);
return 0;

View File

@ -1,86 +0,0 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 2012, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/*
* This example uses the "Streaming HTML parser" to extract the href pieces in
* a streaming manner from a downloaded HTML. Kindly donated by Michał
* Kowalczyk.
*
* The parser is found at
* http://code.google.com/p/htmlstreamparser/
*/
#include <stdio.h>
#include <curl/curl.h>
#include <htmlstreamparser.h>
static size_t write_callback(void *buffer, size_t size, size_t nmemb,
void *hsp)
{
size_t realsize = size * nmemb, p;
for (p = 0; p < realsize; p++) {
html_parser_char_parse(hsp, ((char *)buffer)[p]);
if (html_parser_cmp_tag(hsp, "a", 1))
if (html_parser_cmp_attr(hsp, "href", 4))
if (html_parser_is_in(hsp, HTML_VALUE_ENDED)) {
html_parser_val(hsp)[html_parser_val_length(hsp)] = '\0';
printf("%s\n", html_parser_val(hsp));
}
}
return realsize;
}
int main(int argc, char *argv[])
{
char tag[1], attr[4], val[128];
CURL *curl;
HTMLSTREAMPARSER *hsp;
if (argc != 2) {
printf("Usage: %s URL\n", argv[0]);
return EXIT_FAILURE;
}
curl = curl_easy_init();
hsp = html_parser_init();
html_parser_set_tag_to_lower(hsp, 1);
html_parser_set_attr_to_lower(hsp, 1);
html_parser_set_tag_buffer(hsp, tag, sizeof(tag));
html_parser_set_attr_buffer(hsp, attr, sizeof(attr));
html_parser_set_val_buffer(hsp, val, sizeof(val)-1);
curl_easy_setopt(curl, CURLOPT_URL, argv[1]);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, write_callback);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, hsp);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1);
curl_easy_perform(curl);
curl_easy_cleanup(curl);
html_parser_cleanup(hsp);
return EXIT_SUCCESS;
}

View File

@ -1,25 +1,13 @@
/***************************************************************************
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
* $Id: htmltidy.c,v 1.2 2008-05-22 21:20:09 danf Exp $
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
/* Download a document and use libtidy to parse the HTML.
* Download a document and use libtidy to parse the HTML.
* Written by Jeff Pohlmeyer
*
* LibTidy => http://tidy.sourceforge.net

View File

@ -1,24 +1,13 @@
/***************************************************************************
/*****************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at http://curl.haxx.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
***************************************************************************/
* $Id: htmltitle.cc,v 1.4 2008-05-22 21:20:09 danf Exp $
*/
// Get a web page, parse it with libxml.
//
// Written by Lars Nilsson
@ -153,7 +142,6 @@ static void StartElement(void *voidContext,
context->title = "";
context->addTitle = true;
}
(void) attributes;
}
//

Some files were not shown because too many files have changed in this diff Show More